Merge branch 'master' into 6021-add-schedule-plugin-test-docs

pr-6089
Jameelah Mercer 2025-05-28 11:50:19 +09:00 committed by GitHub
commit d0fad985c5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 2520 additions and 211 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,89 @@
---
mode: 'edit'
applyTo: "content/{influxdb3/core,influxdb3/enterprise,shared/influxdb3*}/**"
---
## Best Practices
- Use UPPERCASE for placeholders to make them easily identifiable
- Don't use pronouns in placeholders (e.g., "your", "this")
- List placeholders in the same order they appear in the code
- Provide clear descriptions including:
- - Expected data type or format
- - Purpose of the value
- - Any constraints or requirements
- Mark optional placeholders as "Optional:" in their descriptions
- Placeholder key descriptions should fit the context of the code snippet
- Include examples for complex formats
## Writing Placeholder Descriptions
Descriptions should follow consistent patterns:
1. **Admin Authentication tokens**:
- Recommended: "a {{% token-link "admin" %}} for your {{< product-name >}} instance"
- Avoid: "your token", "the token", "an authorization token"
2. **Database resource tokens**:
- Recommended: "your {{% token-link "database" %}}"{{% show-in "enterprise" %}} with permissions on the specified database{{% /show-in %}}
- Avoid: "your token", "the token", "an authorization token"
3. **Database names**:
- Recommended: "the name of the database to [action]"
- Avoid: "your database", "the database name"
4. **Conditional content**:
- Use `{{% show-in "enterprise" %}}` for content specific to enterprise versions
- Example: "your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}"
## Common placeholders for InfluxDB 3
- `AUTH_TOKEN`: your {{% token-link %}}
- `DATABASE_NAME`: the database to use
- `TABLE_NAME`: Name of the table/measurement to query or write to
- `NODE_ID`: Node ID for a specific node in a cluster
- `CLUSTER_ID`: Cluster ID for a specific cluster
- `HOST`: InfluxDB server hostname or URL
- `PORT`: InfluxDB server port (typically 8181)
- `QUERY`: SQL or InfluxQL query string
- `LINE_PROTOCOL`: Line protocol data for writes
- `PLUGIN_FILENAME`: Name of plugin file to use
- `CACHE_NAME`: Name for a new or existing cache
## Hugo shortcodes in Markdown
- `{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}}`: Use this shortcode to define placeholders in code snippets.
- `{{% /code-placeholders %}}`: End the shortcode.
- `{{% code-placeholder-key %}}`: Use this shortcode to define a specific placeholder key.
- `{{% /code-placeholder-key %}}`: End the specific placeholder key shortcode.
## Language-Specific Placeholder Formatting
- **Bash/Shell**: Use uppercase variables with no quotes or prefix
```bash
--database DATABASE_NAME
```
- Python: Use string literals with quotes
```python
database_name='DATABASE_NAME'
```
- JSON: Use key-value pairs with quotes
```json
{
"database": "DATABASE_NAME"
}
```
## Real-World Examples from Documentation
### InfluxDB CLI Commands
This pattern appears frequently in CLI documentation:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 write \
--database DATABASE_NAME \
--token AUTH_TOKEN \
--precision ns
{{% /code-placeholders %}}
Replace the following placeholders with your values:
{{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write to
{{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with write permissions on the specified database{{% /show-in %}}

View File

@ -1,6 +1,6 @@
# Contributing to InfluxData Documentation
## Sign the InfluxData CLA
### Sign the InfluxData CLA
The InfluxData Contributor License Agreement (CLA) is part of the legal framework
for the open source ecosystem that protects both you and InfluxData.

View File

@ -0,0 +1,76 @@
#!/usr/bin/env node
/**
* Script to generate GitHub Copilot instructions
* for InfluxData documentation.
*/
import fs from 'fs';
import path from 'path';
import process from 'process';
import { execSync } from 'child_process';
// Get the current file path and directory
export { buildContributingInstructions };
(async () => {
try {
await buildContributingInstructions();
} catch (error) {
console.error('Error generating Copilot instructions:', error);
}
})();
/** Build instructions from CONTRIBUTING.md
* This script reads CONTRIBUTING.md, formats it appropriately,
* and saves it to .github/instructions/contributing.instructions.md
*/
function buildContributingInstructions() {
// Paths
const contributingPath = path.join(process.cwd(), 'CONTRIBUTING.md');
const instructionsDir = path.join(process.cwd(), '.github', 'instructions');
const instructionsPath = path.join(
instructionsDir,
'contributing.instructions.md'
);
// Ensure the instructions directory exists
if (!fs.existsSync(instructionsDir)) {
fs.mkdirSync(instructionsDir, { recursive: true });
}
// Read the CONTRIBUTING.md file
let content = fs.readFileSync(contributingPath, 'utf8');
// Format the content for Copilot instructions with applyTo attribute
content = `---
applyTo: "content/**/*.md, layouts/**/*.html"
---
# GitHub Copilot Instructions for InfluxData Documentation
## Purpose and scope
GitHub Copilot should help document InfluxData products
by creating clear, accurate technical content with proper
code examples, frontmatter, shortcodes, and formatting.
${content}`;
// Write the formatted content to the instructions file
fs.writeFileSync(instructionsPath, content);
console.log(`✅ Generated Copilot instructions at ${instructionsPath}`);
// Add the file to git if it has changed
try {
const gitStatus = execSync(
`git status --porcelain "${instructionsPath}"`
).toString();
if (gitStatus.trim()) {
execSync(`git add "${instructionsPath}"`);
console.log('✅ Added instructions file to git staging');
}
} catch (error) {
console.warn('⚠️ Could not add instructions file to git:', error.message);
}
}

View File

@ -449,6 +449,9 @@ services:
- type: bind
source: ./content
target: /app/content
- type: bind
source: ./CONTRIBUTING.md
target: /app/CONTRIBUTING.md
volumes:
test-content:
cloud-tmp:

View File

@ -16,5 +16,6 @@ source: /shared/influxdb3-write-guides/best-practices/optimize-writes.md
---
<!--
The content for this page is at content/shared/influxdb3-write-guides/best-practices/optimize-writes.md
The content for this page is at
//SOURCE content/shared/influxdb3-write-guides/best-practices/optimize-writes.md
-->

View File

@ -16,5 +16,6 @@ source: /shared/influxdb3-write-guides/best-practices/schema-design.md
---
<!--
The content for this page is at content/shared/influxdb3-write-guides/best-practices/schema-design.md
The content for this page is at
//SOURCE content/shared/influxdb3-write-guides/best-practices/schema-design.md
-->

View File

@ -16,5 +16,6 @@ source: /shared/influxdb3-write-guides/best-practices/optimize-writes.md
---
<!--
The content for this page is at content/shared/influxdb3-write-guides/best-practices/optimize-writes.md
The content for this page is at
//SOURCE content/shared/influxdb3-write-guides/best-practices/optimize-writes.md
-->

View File

@ -16,5 +16,6 @@ source: /shared/influxdb3-write-guides/best-practices/schema-design.md
---
<!--
The content for this page is at content/shared/influxdb3-write-guides/best-practices/schema-design.md
The content for this page is at
//SOURCE content/shared/influxdb3-write-guides/best-practices/schema-design.md
-->

View File

@ -3,7 +3,8 @@ Use these tips to optimize performance and system overhead when writing data to
{{< product-name >}}.
- [Batch writes](#batch-writes)
- [Sort tags by key](#sort-tags-by-key)
{{% hide-in "enterprise,core" %}}- [Sort tags by key](#sort-tags-by-key){{% /hide-in %}}
{{% show-in "enterprise,core" %}}- [On first write, sort tags by query priority](#on-first-write-sort-tags-by-query-priority){{% /show-in %}}
- [Use the coarsest time precision possible](#use-the-coarsest-time-precision-possible)
- [Use gzip compression](#use-gzip-compression)
- [Enable gzip compression in Telegraf](#enable-gzip-compression-in-telegraf)
@ -34,6 +35,8 @@ Write data in batches to minimize network overhead when writing data to InfluxDB
> The optimal batch size is 10,000 lines of line protocol or 10 MBs, whichever
> threshold is met first.
{{% hide-in "enterprise,core" %}}
## Sort tags by key
Before writing data points to InfluxDB, sort tags by key in lexicographic order.
@ -49,6 +52,31 @@ measurement,tagC=therefore,tagE=am,tagA=i,tagD=i,tagB=think fieldKey=fieldValue
measurement,tagA=i,tagB=think,tagC=therefore,tagD=i,tagE=am fieldKey=fieldValue 1562020262
```
{{% /hide-in %}}
{{% show-in "enterprise,core" %}}
## On first write, sort tags by query priority
The first write to a table in {{% product-name %}} determines the physical column
order in storage, and that order has a direct impact on query performance.
Columns that appear earlier are typically faster to filter and access during
query execution.
Sort your tags by query priority when performing the initial write to a table.
Place the most commonly queried tags first—those you frequently use in `WHERE`
clauses or joins—followed by less frequently queried ones. For example, if most
of your queries filter by `region` and then by `host`, structure your first
write so that `region` comes before `host`.
> [!Important]
> Column order is determined on the first write and cannot be changed afterward.
> Tags added after the first write are added last in the column sort order.
> Plan your schema with your query workload in mind to ensure the best long-term
> performance.
{{% /show-in %}}
## Use the coarsest time precision possible
{{< product-name >}} supports up to nanosecond timestamp precision. However,

View File

@ -9,6 +9,7 @@ for simpler and more performant queries.
- [Do not use duplicate names for tags and fields](#do-not-use-duplicate-names-for-tags-and-fields)
- [Maximum number of columns per table](#maximum-number-of-columns-per-table)
- [Design for performance](#design-for-performance)
{{% show-in "enterprise,core" %}}- [Sort tags by query priority](#sort-tags-by-query-priority){{% /show-in %}}
- [Avoid wide schemas](#avoid-wide-schemas)
- [Avoid sparse schemas](#avoid-sparse-schemas)
- [Table schemas should be homogenous](#table-schemas-should-be-homogenous)
@ -135,11 +136,35 @@ the performance of queries against that table.
The following guidelines help to optimize query performance:
{{% show-in "enterprise,core" %}}- [Sort tags by query priority](#sort-tags-by-query-priority){{% /show-in %}}
- [Avoid wide schemas](#avoid-wide-schemas)
- [Avoid sparse schemas](#avoid-sparse-schemas)
- [Table schemas should be homogenous](#table-schemas-should-be-homogenous)
- [Use the best data type for your data](#use-the-best-data-type-for-your-data)
{{% show-in "enterprise,core" %}}
### Sort tags by query priority
The first write to a table in {{% product-name %}} determines the physical column
order in storage, and that order has a direct impact on query performance.
Columns that appear earlier are typically faster to filter and access during
query execution.
Sort your tags by query priority when performing the initial write to a table.
Place the most commonly queried tags first—those you frequently use in `WHERE`
clauses or joins—followed by less frequently queried ones. For example, if most
of your queries filter by `region` and then by `host`, structure your first
write so that `region` comes before `host`.
> [!Important]
> Column order is determined on the first write and cannot be changed afterward.
> Tags added after the first write are added last in the column sort order.
> Plan your schema with your query workload in mind to ensure the best long-term
> performance.
{{% /show-in %}}
### Avoid wide schemas
A wide schema refers to a schema with a large number of columns (tags and fields).

View File

@ -134,10 +134,9 @@ source ~/.zshrc
#### Start InfluxDB
To start your InfluxDB instance, use the `influxdb3 serve` command
and provide the following:
To start your InfluxDB instance, use the `influxdb3 serve` command and provide the following:
- `--object-store`: Specifies the type of Object store to use.
`--object-store`: Specifies the type of object store to use.
InfluxDB supports the following: local file system (`file`), `memory`,
S3 (and compatible services like Ceph or Minio) (`s3`),
Google Cloud Storage (`google`), and Azure Blob Storage (`azure`).
@ -148,6 +147,8 @@ and provide the following:
This forms the final part of the storage path: `<CONFIGURED_PATH>/<NODE_ID>`.
In a multi-node setup, this ID is used to reference specific nodes.
The following examples show how to start {{% product-name %}} with different object store configurations.
> [!Note]
> #### Diskless architecture
>
@ -155,15 +156,13 @@ and provide the following:
> storage alone, eliminating the need for locally attached disks.
> {{% product-name %}} can also work with only local disk storage when needed.
The following examples show how to start InfluxDB 3 with different object store configurations:
```bash
# Memory object store
# Stores data in RAM; doesn't persist data
influxdb3 serve \
--node-id host01 \
--object-store memory
```
##### Filesystem object store
Store data in a specified directory on the local filesystem.
This is the default object store type.
Replace the following with your values:
```bash
# Filesystem object store
@ -174,15 +173,11 @@ influxdb3 serve \
--data-dir ~/.influxdb3
```
To run the [Docker image](/influxdb3/core/install/#docker-image) and persist data to the filesystem, mount a volume for the Object store-for example, pass the following options:
To run the [Docker image](/influxdb3/version/install/#docker-image) and persist data to the filesystem, mount a volume for the object store-for example, pass the following options:
- `-v /path/on/host:/path/in/container`: Mounts a directory from your filesystem to the container
- `--object-store file --data-dir /path/in/container`: Uses the mount for server storage
> [!Note]
>
> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections.
> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/).
<!--pytest.mark.skip-->
```bash
@ -197,13 +192,24 @@ docker run -it \
--data-dir /path/in/container
```
> [!Note]
>
> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections.
> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/).
##### S3 object store
Store data in an S3-compatible object store.
This is useful for production deployments that require high availability and durability.
Provide your bucket name and credentials to access the S3 object store.
```bash
# S3 object store (default is the us-east-1 region)
# Specify the Object store type and associated options
# Specify the object store type and associated options
influxdb3 serve \
--node-id host01 \
--object-store s3 \
--bucket BUCKET \
--bucket OBJECT_STORE_BUCKET \
--aws-access-key AWS_ACCESS_KEY_ID \
--aws-secret-access-key AWS_SECRET_ACCESS_KEY
```
@ -215,14 +221,27 @@ influxdb3 serve \
influxdb3 serve \
--node-id host01 \
--object-store s3 \
--bucket BUCKET \
--bucket OBJECT_STORE_BUCKET \
--aws-access-key-id AWS_ACCESS_KEY_ID \
--aws-secret-access-key AWS_SECRET_ACCESS_KEY \
--aws-endpoint ENDPOINT \
--aws-allow-http
```
For more information about server options, use the CLI help:
#### Memory object store
Store data in RAM without persisting it on shutdown.
It's useful for rapid testing and development.
```bash
# Memory object store
# Stores data in RAM; doesn't persist data
influxdb3 serve \
--node-id host01 \
--object-store memory
```
For more information about server options, use the CLI help or view the [InfluxDB 3 CLI reference](/influxdb3/version/reference/cli/serve/):
```bash
influxdb3 serve --help
@ -232,7 +251,7 @@ influxdb3 serve --help
After you have [started the server](#start-influxdb), you can create and manage tokens using the `influxdb3` CLI or the HTTP API.
{{% product-name %}} uses token-based authentication and authorization which is enabled by default when you start the server.
With authentication enabled, you must provide a token to access server actions.
With authentication enabled, you must provide a token with `influxdb3` CLI commands and HTTP API requests.
An {{% product-name %}} instance can have one _admin token_, which grants access to all CLI actions and API endpoints.
When you create a token, InfluxDB 3 returns a token string in plain text
@ -282,8 +301,54 @@ Replace {{% code-placeholder-key %}}`CONTAINER_NAME`{{% /code-placeholder-key %}
{{< /code-tabs-wrapper >}}
The command returns a token string that you can use to authenticate CLI commands and API requests.
Store your token in a secure location, as you cannot retrieve it from the database later.
For more information, see how to [Manage admin tokens](/influxdb3/version/admin/tokens/admin/).
For more information about tokens, see how to [Manage admin tokens](/influxdb3/version/admin/tokens/admin/).
#### Use tokens to authorize CLI commands and API requests
With authentication enabled (the default), {{% product-name %}} requires a
token for all `influxdb3` CLI commands and HTTP API requests.
In the following examples, replace {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}} with your {{% token-link "admin" %}} string.
{{< tabs-wrapper >}}
{{% tabs %}}
[CLI](#cli-use-a-token)
[HTTP API](#api-use-a-token)
{{% /tabs %}}
{{% tab-content %}}
For `influxdb3` to use your token automatically, assign it your
token to the `INFLUXDB3_AUTH_TOKEN` environment variable:
{{% code-placeholders "AUTH_TOKEN" %}}
```bash
# Set the environment variable for future CLI commands
export INFLUXDB3_AUTH_TOKEN=AUTH_TOKEN
```
{{% /code-placeholders %}}
Or to authenticate a single `influxdb3` CLI command and override `$INFLUXDB3_AUTH_TOKEN`, include the `--token` option:
{{% code-placeholders "AUTH_TOKEN" %}}
```bash
# Use the --token option for a single command
influxdb3 show databases --token AUTH_TOKEN
```
{{% /code-placeholders %}}
{{% /tab-content %}}
{{% tab-content %}}
To authenticate HTTP API requests, include `Bearer <TOKEN>` in the `Authorization` header value:
{{% code-placeholders "AUTH_TOKEN" %}}
```bash
# Include the token in the Authorization HTTP request header
curl "http://{{< influxdb/host >}}/api/v3/configure/database" \
--header "Authorization: Bearer AUTH_TOKEN"
```
{{% /code-placeholders %}}
{{% /tab-content %}}
{{< /tabs-wrapper >}}
### Data model
@ -373,24 +438,18 @@ Use the `influxdb3 write` command to write data to a database.
In the code samples, replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: The name of the [database](/influxdb3/version/admin/databases/) to write to.
{{% show-in "core" %}}
- {{% code-placeholder-key %}}`TOKEN`{{% /code-placeholder-key %}}: A [token](/influxdb3/version/admin/tokens/) for your {{% product-name %}} server.
{{% /show-in %}}
{{% show-in "enterprise" %}}
- {{% code-placeholder-key %}}`TOKEN`{{% /code-placeholder-key %}}: A [token](/influxdb3/version/admin/tokens/)
with permission to write to the specified database.
{{% /show-in %}}
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/version/admin/databases/) to write to.
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to write to the specified database{{% /show-in %}}
##### Write data via stdin
Pass data as quoted line protocol via standard input (stdin)--for example:
{{% code-placeholders "DATABASE_NAME|TOKEN" %}}
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 write \
--database DATABASE_NAME \
--token TOKEN \
--token AUTH_TOKEN \
--precision ns \
--accept-partial \
'cpu,host=Alpha,region=us-west,application=webserver val=1i,usage_percent=20.5,status="OK"
@ -408,17 +467,21 @@ Pass the `--file` option to write line protocol you have saved to a file--for ex
[sample line protocol](#write-data-in-line-protocol-syntax) to a file named `server_data`
and then enter the following command:
{{% code-placeholders "DATABASE_NAME|TOKEN" %}}
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 write \
--database DATABASE_NAME \
--token TOKEN \
--token AUTH_TOKEN \
--precision ns \
--accept-partial \
--file server_data
--file path/to/server_data
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/version/admin/databases/) to write to.
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to write to the specified database{{% /show-in %}}
### Write data using the HTTP API
{{% product-name %}} provides three write API endpoints that respond to HTTP `POST` requests.
@ -450,7 +513,7 @@ and supports the following parameters:
- `?precision=<PRECISION>`: Specify the precision of the timestamp. The default is nanosecond precision.
- request body: The line protocol data to write.
For more information about the parameters, see [Write data](/influxdb3/core/write-data/).
For more information about the parameters, see [Write data](/influxdb3/version/write-data/).
##### Example: write data using the /api/v3 HTTP API
@ -463,6 +526,7 @@ With `accept_partial=true` (default):
```bash
curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto" \
--header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \
--data-raw 'home,room=Sunroom temp=96
home,room=Sunroom temp="hi"'
```
@ -493,6 +557,7 @@ With `accept_partial=false`:
```bash
curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&accept_partial=false" \
--header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \
--data-raw 'home,room=Sunroom temp=96
home,room=Sunroom temp="hi"'
```
@ -546,17 +611,17 @@ The `/write` InfluxDB v1 compatibility endpoint provides backwards compatibility
#### Write responses
By default, InfluxDB acknowledges writes after flushing the WAL file to the Object store (occurring every second).
By default, InfluxDB acknowledges writes after flushing the WAL file to the object store (occurring every second).
For high write throughput, you can send multiple concurrent write requests.
#### Use no_sync for immediate write responses
To reduce the latency of writes, use the `no_sync` write option, which acknowledges writes _before_ WAL persistence completes.
When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, and then immediately responds to the client, without waiting for persistence to the Object store.
When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, and then immediately responds to the client, without waiting for persistence to the object store.
Using `no_sync=true` is best when prioritizing high-throughput writes over absolute durability.
- Default behavior (`no_sync=false`): Waits for data to be written to the Object store before acknowledging the write. Reduces the risk of data loss, but increases the latency of the response.
- Default behavior (`no_sync=false`): Waits for data to be written to the object store before acknowledging the write. Reduces the risk of data loss, but increases the latency of the response.
- With `no_sync=true`: Reduces write latency, but increases the risk of data loss in case of a crash before WAL persistence.
##### Immediate write using the HTTP API
@ -565,30 +630,27 @@ The `no_sync` parameter controls when writes are acknowledged--for example:
```bash
curl "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&no_sync=true" \
--header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \
--data-raw "home,room=Sunroom temp=96"
```
##### Immediate write using the influxdb3 CLI
The `no_sync` CLI option controls when writes are acknowledged--for example:
```bash
influxdb3 write \
--bucket mydb \
--org my_org \
--token my-token \
--no-sync
```
### Create a database or table
To create a database without writing data, use the `create` subcommand--for example:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 create database mydb
influxdb3 create database DATABASE_NAME \
--token AUTH_TOKEN
```
{{% /code-placeholders %}}
To learn more about a subcommand, use the `-h, --help` flag:
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: the {{% token-link "admin" %}} for your {{% product-name %}} server
To learn more about a subcommand, use the `-h, --help` flag or view the [InfluxDB 3 CLI reference](/influxdb3/version/reference/cli/create/):
```bash
influxdb3 create -h
@ -596,7 +658,7 @@ influxdb3 create -h
### Query data
InfluxDB 3 now supports native SQL for querying, in addition to InfluxQL, an
InfluxDB 3 supports native SQL for querying, in addition to InfluxQL, an
SQL-like language customized for time series queries.
{{< product-name >}} limits
@ -662,12 +724,20 @@ $ influxdb3 query --database servers "SELECT DISTINCT usage_percent, time FROM c
To query using InfluxQL, enter the `influxdb3 query` subcommand and specify `influxql` in the language option--for example:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 query \
--database servers \
--database DATABASE_NAME \
--token <AUTH_TOKEN> \
--language influxql \
"SELECT DISTINCT usage_percent FROM cpu WHERE time >= now() - 1d"
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}
### Query using the API
@ -683,18 +753,35 @@ Use the `format` parameter to specify the response format: `pretty`, `jsonl`, `p
The following example sends an HTTP `GET` request with a URL-encoded SQL query:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
curl -v "http://{{< influxdb/host >}}/api/v3/query_sql?db=servers&q=select+*+from+cpu+limit+5"
curl -G "http://{{< influxdb/host >}}/api/v3/query_sql" \
--header 'Authorization: Bearer AUTH_TOKEN' \
--data-urlencode "db=DATABASE_NAME" \
--data-urlencode "q=select * from cpu limit 5"
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}
##### Example: Query passing JSON parameters
The following example sends an HTTP `POST` request with parameters in a JSON payload:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
curl http://{{< influxdb/host >}}/api/v3/query_sql \
--data '{"db": "server", "q": "select * from cpu limit 5"}'
--data '{"db": "DATABASE_NAME", "q": "select * from cpu limit 5"}'
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}
### Query using the Python client
@ -709,24 +796,35 @@ pip install influxdb3-python
From here, you can connect to your database with the client library using just the **host** and **database name:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```python
from influxdb_client_3 import InfluxDBClient3
client = InfluxDBClient3(
token='AUTH_TOKEN',
host='http://{{< influxdb/host >}}',
database='servers'
database='DATABASE_NAME'
)
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}
The following example shows how to query using SQL, and then
use PyArrow to explore the schema and process results:
use PyArrow to explore the schema and process results.
To authorize the query, the example retrieves the {{% token-link "database" %}}
from the `INFLUXDB3_AUTH_TOKEN` environment variable.
```python
from influxdb_client_3 import InfluxDBClient3
import os
client = InfluxDBClient3(
token=os.environ.get('INFLUXDB3_AUTH_TOKEN'),
host='http://{{< influxdb/host >}}',
database='servers'
)
@ -776,12 +874,22 @@ visualization of your time series data.
{{% product-name %}} supports a **last-n values cache** which stores the last N values in a series or column hierarchy in memory. This gives the database the ability to answer these kinds of queries in under 10 milliseconds.
You can use the `influxdb3` CLI to [create a last value cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/).
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}}
```bash
influxdb3 create last_cache \
-d <DATABASE_NAME> \
-t <TABLE> \
[CACHE_NAME]
--token AUTH_TOKEN
--database DATABASE_NAME \
--table TABLE_NAME \
CACHE_NAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the last values cache in
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the last values cache in
- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: Optionally, a name for the new cache
Consider the following `cpu` sample table:
@ -797,6 +905,7 @@ The following command creates a last value cache named `cpuCache`:
```bash
influxdb3 create last_cache \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database servers \
--table cpu \
--key-columns host,application \
@ -808,10 +917,11 @@ _You can create a last values cache per time series, but be mindful of high card
#### Query a last values cache
To use the LVC, call it using the `last_cache()` function in your query--for example:
To query data from the LVC, use the [`last_cache()`](influxdb3/version/reference/sql/functions/cache/#last_cache) function in your query--for example:
```bash
influxdb3 query \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database servers \
"SELECT * FROM last_cache('cpu', 'cpuCache') WHERE host = 'Bravo';"
```
@ -819,32 +929,53 @@ influxdb3 query \
> [!Note]
> #### Only works with SQL
>
> The Last values cache only works with SQL, not InfluxQL; SQL is the default language.
> The last values cache only works with SQL, not InfluxQL; SQL is the default language.
#### Delete a Last values cache
#### Delete a last values cache
Use the `influxdb3` CLI to [delete a last values cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/)
{{% code-placeholders "DATABASE_NAME|TABLE_NAME|CACHE_NAME" %}}
```bash
influxdb3 delete last_cache \
--database <DATABASE_NAME> \
--table <TABLE> \
--cache-name <CACHE_NAME>
--token AUTH_TOKEN \
--database DATABASE_NAME \
--table TABLE \
--cache-name CACHE_NAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to delete the last values cache from
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to delete the last values cache from
- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: the name of the last values cache to delete
### Distinct values cache
Similar to the Last values cache, the database can cache in RAM the distinct values for a single column in a table or a hierarchy of columns. This is useful for fast metadata lookups, which can return in under 30 milliseconds. Many of the options are similar to the last value cache.
Similar to the [last values cache](#last-values-cache), the database can cache in RAM the distinct values for a single column in a table or a hierarchy of columns.
This is useful for fast metadata lookups, which can return in under 30 milliseconds.
Many of the options are similar to the last value cache.
You can use the `influxdb3` CLI to [create a distinct values cache](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/).
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}}
```bash
influxdb3 create distinct_cache \
--database <DATABASE_NAME> \
--table <TABLE> \
--columns <COLUMNS> \
[CACHE_NAME]
--token AUTH_TOKEN \
--database DATABASE_NAME \
--table TABLE \
--columns COLUMNS \
CACHE_NAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the last values cache in
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the distinct values cache in
- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: Optionally, a name for the new cache
Consider the following `cpu` sample table:
@ -860,6 +991,7 @@ The following command creates a distinct values cache named `cpuDistinctCache`:
```bash
influxdb3 create distinct_cache \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database servers \
--table cpu \
--columns host,application \
@ -868,10 +1000,11 @@ influxdb3 create distinct_cache \
#### Query a distinct values cache
To use the distinct values cache, call it using the `distinct_cache()` function in your query--for example:
To query data from the distinct values cache, use the [`distinct_cache()`](/influxdb3/version/reference/sql/functions/cache/#distinct_cache) function in your query--for example:
```bash
influxdb3 query \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database servers \
"SELECT * FROM distinct_cache('cpu', 'cpuDistinctCache')"
```
@ -885,19 +1018,28 @@ influxdb3 query \
Use the `influxdb3` CLI to [delete a distinct values cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/)
{{% code-placeholders "DATABASE_NAME|TABLE_NAME|CACHE_NAME" %}}
```bash
influxdb3 delete distinct_cache \
--database <DATABASE_NAME> \
--table <TABLE> \
--cache-name <CACHE_NAME>
--token AUTH_TOKEN \
--database DATABASE_NAME \
--table TABLE \
--cache-name CACHE_NAME
```
{{% /code-placeholders %}}
### Python plugins and the Processing engine
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to delete the distinct values cache from
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to delete the distinct values cache from
- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: the name of the distinct values cache to delete
The InfluxDB 3 Processing engine is an embedded Python VM for running code inside the database to process and transform data.
### Python plugins and the processing engine
To activate the Processing engine, pass the `--plugin-dir <PLUGIN_DIR>` option when starting the {{% product-name %}} server.
`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the Processing engine to run.
The InfluxDB 3 processing engine is an embedded Python VM for running code inside the database to process and transform data.
To activate the processing engine, pass the `--plugin-dir <PLUGIN_DIR>` option when starting the {{% product-name %}} server.
`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the processing engine to run.
#### Plugin
@ -1011,13 +1153,24 @@ To test a plugin, do the following:
- `--lp` or `--file`: The line protocol to test
- Optional: `--input-arguments`: A comma-delimited list of `<KEY>=<VALUE>` arguments for your plugin code
```bash
influxdb3 test wal_plugin \
--lp <INPUT_LINE_PROTOCOL> \
--input-arguments "arg1=foo,arg2=bar" \
--database <DATABASE_NAME> \
<PLUGIN_FILENAME>
```
{{% code-placeholders "INPUT_LINE_PROTOCOL|INPUT_ARGS|DATABASE_NAME|AUTH_TOKEN|PLUGIN_FILENAME" %}}
```bash
influxdb3 test wal_plugin \
--lp INPUT_LINE_PROTOCOL \
--input-arguments INPUT_ARGS \
--database DATABASE_NAME \
--token AUTH_TOKEN \
PLUGIN_FILENAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`INPUT_LINE_PROTOCOL`{{% /code-placeholder-key %}}: the line protocol to test
- Optional: {{% code-placeholder-key %}}`INPUT_ARGS`{{% /code-placeholder-key %}}: a comma-delimited list of `<KEY>=<VALUE>` arguments for your plugin code--for example, `arg1=hello,arg2=world`
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to test against
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: the {{% token-link "admin" %}} for your {{% product-name %}} server
- {{% code-placeholder-key %}}`PLUGIN_FILENAME`{{% /code-placeholder-key %}}: the name of the plugin file to test
The command runs the plugin code with the test data, yields the data to the plugin code, and then responds with the plugin result.
You can quickly see how the plugin behaves, what data it would have written to the database, and any errors.
@ -1042,7 +1195,8 @@ trigger:
# Test a plugin
influxdb3 test wal_plugin \
--lp "my_measure,tag1=asdf f1=1.0 123" \
--database mydb \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database sensors \
--input-arguments "arg1=hello,arg2=world" \
test.py
```
@ -1050,7 +1204,8 @@ influxdb3 test wal_plugin \
```bash
# Create a trigger that runs the plugin
influxdb3 create trigger \
-d mydb \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database sensors \
--plugin test_plugin \
--trigger-spec "table:foo" \
--trigger-arguments "arg1=hello,arg2=world" \
@ -1060,8 +1215,28 @@ influxdb3 create trigger \
After you have created a plugin and trigger, enter the following command to
enable the trigger and have it run the plugin as you write data:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TRIGGER_NAME" %}}
```bash
influxdb3 enable trigger --database mydb trigger1
influxdb3 enable trigger \
--token AUTH_TOKEN \
--database DATABASE_NAME \
TRIGGER_NAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to enable the trigger in
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}: the name of the trigger to enable
For example, to enable the trigger named `trigger1` in the `sensors` database:
```bash
influxdb3 enable trigger \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database sensors
trigger1
```
For more information, see [Python plugins and the Processing engine](/influxdb3/version/plugins/).

View File

@ -133,21 +133,20 @@ source ~/.zshrc
#### Start InfluxDB
To start your InfluxDB instance, use the `influxdb3 serve` command
and provide the following:
To start your InfluxDB instance, use the `influxdb3 serve` command and provide the following:
- `--object-store`: Specifies the type of Object store to use.
- `--object-store`: Specifies the type of object store to use.
InfluxDB supports the following: local file system (`file`), `memory`,
S3 (and compatible services like Ceph or Minio) (`s3`),
Google Cloud Storage (`google`), and Azure Blob Storage (`azure`).
The default is `file`.
Depending on the object store type, you may need to provide additional options
for your object store configuration.
- `--cluster-id`: A string identifier that determines part of the storage path hierarchy. All nodes within the same cluster share this identifier. The storage path follows the pattern `<CONFIGURED_PATH>/<CLUSTER_ID>/<NODE_ID>`. In a multi-node setup, this ID is used to reference the entire cluster.
- `--node-id`: A string identifier that distinguishes individual server instances within the cluster. This forms the final part of the storage path: `<CONFIGURED_PATH>/<CLUSTER_ID>/<NODE_ID>`. In a multi-node setup, this ID is used to reference specific nodes.
- `--cluster-id`: A string identifier that determines part of the storage path hierarchy. All nodes within the same cluster share this identifier. The storage path follows the pattern `<CONFIGURED_PATH>/<CLUSTER_ID>/<NODE_ID>`. In a multi-node setup, this ID is used to reference the entire cluster.
> [!Note]
> The combined path structure `<CONFIGURED_PATH>/<CLUSTER_ID>/<NODE_ID>` ensures proper organization of data in your object store, allowing for clean separation between clusters and individual nodes.
The following examples show how to start {{% product-name %}} with different object store configurations.
> [!Note]
> #### Diskless architecture
@ -156,16 +155,15 @@ and provide the following:
> storage alone, eliminating the need for locally attached disks.
> {{% product-name %}} can also work with only local disk storage when needed.
The following examples show how to start InfluxDB 3 with different object store configurations:
> [!Note]
> The combined path structure `<CONFIGURED_PATH>/<CLUSTER_ID>/<NODE_ID>` ensures proper organization of data in your object store, allowing for clean separation between clusters and individual nodes.
```bash
# Memory object store
# Stores data in RAM; doesn't persist data
influxdb3 serve \
--node-id host01 \
--cluster-id cluster01 \
--object-store memory
```
##### Filesystem object store
Store data in a specified directory on the local filesystem.
This is the default object store type.
Replace the following with your values:
```bash
# Filesystem object store
@ -177,15 +175,12 @@ influxdb3 serve \
--data-dir ~/.influxdb3
```
To run the [Docker image](/influxdb3/enterprise/install/#docker-image) and persist data to the filesystem, mount a volume for the Object store-for example, pass the following options:
To run the [Docker image](/influxdb3/version/install/#docker-image) and persist data to the filesystem, mount a volume for the object store-for example, pass the following options:
- `-v /path/on/host:/path/in/container`: Mounts a directory from your filesystem to the container
- `--object-store file --data-dir /path/in/container`: Uses the mount for server storage
> [!Note]
>
> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections.
> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/).
<!--pytest.mark.skip-->
```bash
@ -201,15 +196,28 @@ docker run -it \
--data-dir /path/in/container
```
> [!Note]
>
> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections.
> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/).
##### S3 object store
Store data in an S3-compatible object store.
This is useful for production deployments that require high availability and durability.
Provide your bucket name and credentials to access the S3 object store.
```bash
# S3 object store (default is the us-east-1 region)
# Specify the Object store type and associated options
# Specify the object store type and associated options
influxdb3 serve \
--node-id host01 \
--cluster-id cluster01 \
--object-store s3 \
--bucket BUCKET \
--aws-access-key-id AWS_ACCESS_KEY_ID \
--bucket OBJECT_STORE_BUCKET \
--aws-access-key AWS_ACCESS_KEY_ID \
--aws-secret-access-key AWS_SECRET_ACCESS_KEY
```
@ -221,14 +229,28 @@ influxdb3 serve \
--node-id host01 \
--cluster-id cluster01 \
--object-store s3 \
--bucket BUCKET \
--bucket OBJECT_STORE_BUCKET \
--aws-access-key-id AWS_ACCESS_KEY_ID \
--aws-secret-access-key AWS_SECRET_ACCESS_KEY \
--aws-endpoint ENDPOINT \
--aws-allow-http
```
For more information about server options, use the CLI help:
#### Memory object store
Store data in RAM without persisting it on shutdown.
It's useful for rapid testing and development.
```bash
# Memory object store
# Stores data in RAM; doesn't persist data
influxdb3 serve \
--node-id host01 \
--cluster-id cluster01 \
--object-store memory
```
For more information about server options, use the CLI help or view the [InfluxDB 3 CLI reference](/influxdb3/version/reference/cli/serve/):
```bash
influxdb3 serve --help
@ -236,7 +258,7 @@ influxdb3 serve --help
#### Licensing
When first starting a new instance, InfluxDB prompts you to select a license type.
When first starting a new instance, {{% product-name %}} prompts you to select a license type.
InfluxDB 3 Enterprise licenses authorize the use of the InfluxDB 3 Enterprise software and apply to a single cluster. Licenses are primarily based on the number of CPUs InfluxDB can use, but there are other limitations depending on the license type. The following InfluxDB 3 Enterprise license types are available:
@ -250,7 +272,7 @@ You can learn more on managing your InfluxDB 3 Enterprise license on the [Manage
After you have [started the server](#start-influxdb), you can create and manage tokens using the `influxdb3` CLI or the HTTP API.
{{% product-name %}} uses token-based authentication and authorization which is enabled by default when you start the server.
With authentication enabled, you must provide a token to access server actions.
With authentication enabled, you must provide a token with `influxdb3` CLI commands and HTTP API requests.
{{% product-name %}} supports the following types of tokens:
- **admin token**: Grants access to all CLI actions and API endpoints. A server can have one admin token.
@ -312,11 +334,12 @@ Replace {{% code-placeholder-key %}}`CONTAINER_NAME`{{% /code-placeholder-key %}
{{< /code-tabs-wrapper >}}
The command returns a token string that you can use to authenticate CLI commands and API requests.
Store your token in a secure location, as you cannot retrieve it from the database later.
For more information about tokens, see how to [Manage admin tokens](/influxdb3/version/admin/tokens/admin/).
After you have created an admin token, you can use it to create database tokens and system tokens.
For more information, see how to [Manage admin tokens](/influxdb3/version/admin/tokens/admin/).
#### Create a database token
To create a database token, use the `influxdb3 create token` subcommand and pass the following:
@ -394,21 +417,59 @@ For more information, see how to [Manage resource tokens](/influxdb3/version/adm
#### Use tokens to authorize CLI commands and API requests
- To authenticate `influxdb3` CLI commands, use the `--token` option or assign your
token to the `INFLUXDB3_AUTH_TOKEN` environment variable for `influxdb3` to use it automatically.
- To authenticate HTTP API requests, include `Bearer <TOKEN>` in the `Authorization` header value--for example:
#### Use tokens to authorize CLI commands and API requests
{{% code-placeholders "SYSTEM_TOKEN" %}}
With authentication enabled (the default), {{% product-name %}} requires a
token for all `influxdb3` CLI commands and HTTP API requests.
```bash
curl "http://{{< influxdb/host >}}/health" \
--header "Authorization: Bearer SYSTEM_TOKEN"
```
In the following examples, replace {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}} with your {{% token-link "admin" %}} string.
{{< tabs-wrapper >}}
{{% tabs %}}
[CLI](#cli-use-a-token)
[HTTP API](#api-use-a-token)
{{% /tabs %}}
{{% tab-content %}}
For `influxdb3` to use your token automatically, assign it your
token to the `INFLUXDB3_AUTH_TOKEN` environment variable:
{{% code-placeholders "AUTH_TOKEN" %}}
```bash
# Set the environment variable for future CLI commands
export INFLUXDB3_AUTH_TOKEN=AUTH_TOKEN
```
{{% /code-placeholders %}}
Replace the following:
Or to authenticate a single `influxdb3` CLI command and override `$INFLUXDB3_AUTH_TOKEN`, include the `--token` option:
In your command, replace {{% code-placeholder-key %}}`SYSTEM_TOKEN`{{% /code-placeholder-key %}}: System token that grants access to system endpoints (`/health`, `/metrics`, etc.)
{{% code-placeholders "AUTH_TOKEN" %}}
```bash
# Use the --token option for a single command
influxdb3 show databases --token AUTH_TOKEN
```
{{% /code-placeholders %}}
{{% /tab-content %}}
{{% tab-content %}}
To authenticate HTTP API requests, include `Bearer <TOKEN>` in the `Authorization` header value:
{{% code-placeholders "AUTH_TOKEN" %}}
```bash
# Include the token in the Authorization HTTP request header
curl "http://{{< influxdb/host >}}/api/v3/configure/database" \
--header "Authorization: Bearer AUTH_TOKEN"
```
{{% /code-placeholders %}}
{{% code-placeholders "SYSTEM_TOKEN" %}}
```bash
curl "http://{{< influxdb/host >}}/health" \
--header "Authorization: Bearer SYSTEM_TOKEN"
```
{{% /code-placeholders %}}
Replace {{% code-placeholder-key %}}`SYSTEM_TOKEN`{{% /code-placeholder-key %}} with the system token string that grants access to system endpoints (`/health`, `/metrics`)
{{% /tab-content %}}
{{< /tabs-wrapper >}}
### Data model
@ -504,11 +565,11 @@ In the code samples, replace the following placeholders with your values:
Pass data as quoted line protocol via standard input (stdin)--for example:
{{% code-placeholders "DATABASE_NAME|TOKEN" %}}
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 write \
--database DATABASE_NAME \
--token TOKEN \
--token AUTH_TOKEN \
--precision ns \
--accept-partial \
'cpu,host=Alpha,region=us-west,application=webserver val=1i,usage_percent=20.5,status="OK"
@ -526,17 +587,21 @@ Pass the `--file` option to write line protocol you have saved to a file--for ex
[sample line protocol](#write-data-in-line-protocol-syntax) to a file named `server_data`
and then enter the following command:
{{% code-placeholders "DATABASE_NAME|TOKEN" %}}
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 write \
--database DATABASE_NAME \
--token TOKEN \
--token AUTH_TOKEN \
--precision ns \
--accept-partial \
--file server_data
--file path/to/server_data
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/version/admin/databases/) to write to.
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to write to the specified database{{% /show-in %}}
### Write data using the HTTP API
{{% product-name %}} provides three write API endpoints that respond to HTTP `POST` requests.
@ -568,7 +633,7 @@ and supports the following parameters:
- `?precision=<PRECISION>`: Specify the precision of the timestamp. The default is nanosecond precision.
- request body: The line protocol data to write.
For more information about the parameters, see [Write data](/influxdb3/core/write-data/).
For more information about the parameters, see [Write data](/influxdb3/version/write-data/).
##### Example: write data using the /api/v3 HTTP API
@ -581,6 +646,7 @@ With `accept_partial=true` (default):
```bash
curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto" \
--header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \
--data-raw 'home,room=Sunroom temp=96
home,room=Sunroom temp="hi"'
```
@ -611,6 +677,7 @@ With `accept_partial=false`:
```bash
curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&accept_partial=false" \
--header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \
--data-raw 'home,room=Sunroom temp=96
home,room=Sunroom temp="hi"'
```
@ -664,17 +731,17 @@ The `/write` InfluxDB v1 compatibility endpoint provides backwards compatibility
#### Write responses
By default, InfluxDB acknowledges writes after flushing the WAL file to the Object store (occurring every second).
By default, InfluxDB acknowledges writes after flushing the WAL file to the object store (occurring every second).
For high write throughput, you can send multiple concurrent write requests.
#### Use no_sync for immediate write responses
To reduce the latency of writes, use the `no_sync` write option, which acknowledges writes _before_ WAL persistence completes.
When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, and then immediately responds to the client, without waiting for persistence to the Object store.
When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, and then immediately responds to the client, without waiting for persistence to the object store.
Using `no_sync=true` is best when prioritizing high-throughput writes over absolute durability.
- Default behavior (`no_sync=false`): Waits for data to be written to the Object store before acknowledging the write. Reduces the risk of data loss, but increases the latency of the response.
- Default behavior (`no_sync=false`): Waits for data to be written to the object store before acknowledging the write. Reduces the risk of data loss, but increases the latency of the response.
- With `no_sync=true`: Reduces write latency, but increases the risk of data loss in case of a crash before WAL persistence.
##### Immediate write using the HTTP API
@ -683,30 +750,27 @@ The `no_sync` parameter controls when writes are acknowledged--for example:
```bash
curl "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&no_sync=true" \
--header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \
--data-raw "home,room=Sunroom temp=96"
```
##### Immediate write using the influxdb3 CLI
The `no_sync` CLI option controls when writes are acknowledged--for example:
```bash
influxdb3 write \
--bucket mydb \
--org my_org \
--token my-token \
--no-sync
```
### Create a database or table
To create a database without writing data, use the `create` subcommand--for example:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 create database mydb
influxdb3 create database DATABASE_NAME \
--token AUTH_TOKEN
```
{{% /code-placeholders %}}
To learn more about a subcommand, use the `-h, --help` flag:
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: the {{% token-link "admin" %}} for your {{% product-name %}} server
To learn more about a subcommand, use the `-h, --help` flag or view the [InfluxDB 3 CLI reference](/influxdb3/version/reference/cli/create/):
```bash
influxdb3 create -h
@ -774,12 +838,20 @@ $ influxdb3 query --database servers "SELECT DISTINCT usage_percent, time FROM c
To query using InfluxQL, enter the `influxdb3 query` subcommand and specify `influxql` in the language option--for example:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 query \
--database servers \
--database DATABASE_NAME \
--token <AUTH_TOKEN> \
--language influxql \
"SELECT DISTINCT usage_percent FROM cpu WHERE time >= now() - 1d"
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}
### Query using the API
@ -795,18 +867,35 @@ Use the `format` parameter to specify the response format: `pretty`, `jsonl`, `p
The following example sends an HTTP `GET` request with a URL-encoded SQL query:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
curl -v "http://{{< influxdb/host >}}/api/v3/query_sql?db=servers&q=select+*+from+cpu+limit+5"
curl -G "http://{{< influxdb/host >}}/api/v3/query_sql" \
--header 'Authorization: Bearer AUTH_TOKEN' \
--data-urlencode "db=DATABASE_NAME" \
--data-urlencode "q=select * from cpu limit 5"
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}
##### Example: Query passing JSON parameters
The following example sends an HTTP `POST` request with parameters in a JSON payload:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
curl http://{{< influxdb/host >}}/api/v3/query_sql \
--data '{"db": "server", "q": "select * from cpu limit 5"}'
--data '{"db": "DATABASE_NAME", "q": "select * from cpu limit 5"}'
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}
### Query using the Python client
@ -821,24 +910,35 @@ pip install influxdb3-python
From here, you can connect to your database with the client library using just the **host** and **database name:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```python
from influxdb_client_3 import InfluxDBClient3
client = InfluxDBClient3(
token='AUTH_TOKEN',
host='http://{{< influxdb/host >}}',
database='servers'
database='DATABASE_NAME'
)
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}
The following example shows how to query using SQL, and then
use PyArrow to explore the schema and process results:
use PyArrow to explore the schema and process results.
To authorize the query, the example retrieves the {{% token-link "database" %}}
from the `INFLUXDB3_AUTH_TOKEN` environment variable.
```python
from influxdb_client_3 import InfluxDBClient3
import os
client = InfluxDBClient3(
token=os.environ.get('INFLUXDB3_AUTH_TOKEN'),
host='http://{{< influxdb/host >}}',
database='servers'
)
@ -883,18 +983,27 @@ Set your expected database connection details on the Settings page.
From there, you can query data, browser your database schema, and do basic
visualization of your time series data.
### Last values cache
{{% product-name %}} supports a **last-n values cache** which stores the last N values in a series or column hierarchy in memory. This gives the database the ability to answer these kinds of queries in under 10 milliseconds.
You can use the `influxdb3` CLI to create a last value cache.
You can use the `influxdb3` CLI to [create a last value cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/).
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}}
```bash
influxdb3 create last_cache \
-d <DATABASE_NAME> \
-t <TABLE> \
[CACHE_NAME]
--token AUTH_TOKEN
--database DATABASE_NAME \
--table TABLE_NAME \
CACHE_NAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the last values cache in
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the last values cache in
- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: Optionally, a name for the new cache
Consider the following `cpu` sample table:
@ -910,6 +1019,7 @@ The following command creates a last value cache named `cpuCache`:
```bash
influxdb3 create last_cache \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database servers \
--table cpu \
--key-columns host,application \
@ -921,10 +1031,11 @@ _You can create a last values cache per time series, but be mindful of high card
#### Query a last values cache
To use the LVC, call it using the `last_cache()` function in your query--for example:
To query data from the LVC, use the [`last_cache()`](influxdb3/version/reference/sql/functions/cache/#last_cache) function in your query--for example:
```bash
influxdb3 query \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database servers \
"SELECT * FROM last_cache('cpu', 'cpuCache') WHERE host = 'Bravo';"
```
@ -932,32 +1043,53 @@ influxdb3 query \
> [!Note]
> #### Only works with SQL
>
> The Last values cache only works with SQL, not InfluxQL; SQL is the default language.
> The last values cache only works with SQL, not InfluxQL; SQL is the default language.
#### Delete a Last values cache
#### Delete a last values cache
Use the `influxdb3` CLI to [delete a last values cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/)
{{% code-placeholders "DATABASE_NAME|TABLE_NAME|CACHE_NAME" %}}
```bash
influxdb3 delete last_cache \
--database <DATABASE_NAME> \
--table <TABLE> \
--cache-name <CACHE_NAME>
--token AUTH_TOKEN \
--database DATABASE_NAME \
--table TABLE \
--cache-name CACHE_NAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to delete the last values cache from
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to delete the last values cache from
- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: the name of the last values cache to delete
### Distinct values cache
Similar to the Last values cache, the database can cache in RAM the distinct values for a single column in a table or a hierarchy of columns. This is useful for fast metadata lookups, which can return in under 30 milliseconds. Many of the options are similar to the last value cache.
Similar to the [last values cache](#last-values-cache), the database can cache in RAM the distinct values for a single column in a table or a hierarchy of columns.
This is useful for fast metadata lookups, which can return in under 30 milliseconds.
Many of the options are similar to the last value cache.
You can use the `influxdb3` CLI to [create a distinct values cache](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/).
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}}
```bash
influxdb3 create distinct_cache \
--database <DATABASE_NAME> \
--table <TABLE> \
--columns <COLUMNS> \
[CACHE_NAME]
--token AUTH_TOKEN \
--database DATABASE_NAME \
--table TABLE \
--columns COLUMNS \
CACHE_NAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the last values cache in
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the distinct values cache in
- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: Optionally, a name for the new cache
Consider the following `cpu` sample table:
@ -973,6 +1105,7 @@ The following command creates a distinct values cache named `cpuDistinctCache`:
```bash
influxdb3 create distinct_cache \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database servers \
--table cpu \
--columns host,application \
@ -985,6 +1118,7 @@ To use the distinct values cache, call it using the `distinct_cache()` function
```bash
influxdb3 query \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database servers \
"SELECT * FROM distinct_cache('cpu', 'cpuDistinctCache')"
```
@ -998,19 +1132,28 @@ influxdb3 query \
Use the `influxdb3` CLI to [delete a distinct values cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/)
{{% code-placeholders "DATABASE_NAME|TABLE_NAME|CACHE_NAME" %}}
```bash
influxdb3 delete distinct_cache \
--database <DATABASE_NAME> \
--table <TABLE> \
--cache-name <CACHE_NAME>
--token AUTH_TOKEN \
--database DATABASE_NAME \
--table TABLE \
--cache-name CACHE_NAME
```
{{% /code-placeholders %}}
### Python plugins and the Processing engine
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to delete the distinct values cache from
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to delete the distinct values cache from
- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: the name of the distinct values cache to delete
The InfluxDB 3 Processing engine is an embedded Python VM for running code inside the database to process and transform data.
### Python plugins and the processing engine
To activate the Processing engine, pass the `--plugin-dir <PLUGIN_DIR>` option when starting the {{% product-name %}} server.
`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the Processing engine to run.
The InfluxDB 3 processing engine is an embedded Python VM for running code inside the database to process and transform data.
To activate the processing engine, pass the `--plugin-dir <PLUGIN_DIR>` option when starting the {{% product-name %}} server.
`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the processing engine to run.
#### Plugin
@ -1027,7 +1170,7 @@ InfluxDB 3 provides the following types of triggers, each with specific trigger-
- **On WAL flush**: Sends a batch of written data (for a specific table or all tables) to a plugin (by default, every second).
- **On Schedule**: Executes a plugin on a user-configured schedule (using a crontab or a duration); useful for data collection and deadman monitoring.
- **On Request**: Binds a plugin to a custom HTTP API endpoint at `/api/v3/engine/<ENDPOINT>`.
- **On Request**: Binds a plugin to a custom HTTP API endpoint at `/api/v3/engine/<ENDPOINT_PATH>`.
The plugin receives the HTTP request headers and content, and can then parse, process, and send the data into the database or to third-party services.
### Test, create, and trigger plugin code
@ -1124,13 +1267,24 @@ To test a plugin, do the following:
- `--lp` or `--file`: The line protocol to test
- Optional: `--input-arguments`: A comma-delimited list of `<KEY>=<VALUE>` arguments for your plugin code
```bash
influxdb3 test wal_plugin \
--lp <INPUT_LINE_PROTOCOL> \
--input-arguments "arg1=foo,arg2=bar" \
--database <DATABASE_NAME> \
<PLUGIN_FILENAME>
```
{{% code-placeholders "INPUT_LINE_PROTOCOL|INPUT_ARGS|DATABASE_NAME|AUTH_TOKEN|PLUGIN_FILENAME" %}}
```bash
influxdb3 test wal_plugin \
--lp INPUT_LINE_PROTOCOL \
--input-arguments INPUT_ARGS \
--database DATABASE_NAME \
--token AUTH_TOKEN \
PLUGIN_FILENAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`INPUT_LINE_PROTOCOL`{{% /code-placeholder-key %}}: the line protocol to test
- Optional: {{% code-placeholder-key %}}`INPUT_ARGS`{{% /code-placeholder-key %}}: a comma-delimited list of `<KEY>=<VALUE>` arguments for your plugin code--for example, `arg1=hello,arg2=world`
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to test against
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: the {{% token-link "admin" %}} for your {{% product-name %}} server
- {{% code-placeholder-key %}}`PLUGIN_FILENAME`{{% /code-placeholder-key %}}: the name of the plugin file to test
The command runs the plugin code with the test data, yields the data to the plugin code, and then responds with the plugin result.
You can quickly see how the plugin behaves, what data it would have written to the database, and any errors.
@ -1155,7 +1309,8 @@ trigger:
# Test a plugin
influxdb3 test wal_plugin \
--lp "my_measure,tag1=asdf f1=1.0 123" \
--database mydb \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database sensors \
--input-arguments "arg1=hello,arg2=world" \
test.py
```
@ -1163,7 +1318,8 @@ influxdb3 test wal_plugin \
```bash
# Create a trigger that runs the plugin
influxdb3 create trigger \
-d mydb \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database sensors \
--plugin test_plugin \
--trigger-spec "table:foo" \
--trigger-arguments "arg1=hello,arg2=world" \
@ -1173,8 +1329,28 @@ influxdb3 create trigger \
After you have created a plugin and trigger, enter the following command to
enable the trigger and have it run the plugin as you write data:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TRIGGER_NAME" %}}
```bash
influxdb3 enable trigger --database mydb trigger1
influxdb3 enable trigger \
--token AUTH_TOKEN \
--database DATABASE_NAME \
TRIGGER_NAME
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to enable the trigger in
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}: the name of the trigger to enable
For example, to enable the trigger named `trigger1` in the `sensors` database:
```bash
influxdb3 enable trigger \
--token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \
--database sensors
trigger1
```
For more information, see [Python plugins and the Processing engine](/influxdb3/version/plugins/).
@ -1225,6 +1401,7 @@ influxdb3 serve \
--http-bind {{< influxdb/host >}} \
--aws-access-key-id <AWS_ACCESS_KEY_ID> \
--aws-secret-access-key <AWS_SECRET_ACCESS_KEY>
```
```bash
## NODE 2
@ -1241,8 +1418,8 @@ influxdb3 serve \
--object-store s3 \
--bucket influxdb-3-enterprise-storage \
--http-bind localhost:8282 \
--aws-access-key-id <AWS_ACCESS_KEY_ID> \
--aws-secret-access-key <AWS_SECRET_ACCESS_KEY>
--aws-access-key-id AWS_ACCESS_KEY_ID \
--aws-secret-access-key AWS_SECRET_ACCESS_KEY
```
After the nodes have started, querying either node returns data for both nodes, and _NODE 1_ runs compaction.
@ -1442,28 +1619,43 @@ You can use the default port `8181` for any write or query, without changing any
>
> When running multiple local instances for testing or separate nodes in production, specifying the host ensures writes and queries are routed to the correct instance.
{{% code-placeholders "(http://localhost:8585)|AUTH_TOKEN|DATABASE_NAME|QUERY" %}}
```bash
# Example variables on a query
# Example querying a specific host
# HTTP-bound Port: 8585
influxdb3 query http://localhost:8585 --database <DATABASE> "<QUERY>"
influxdb3 query \
--host http://localhost:8585
--token AUTH_TOKEN \
--database DATABASE_NAME "QUERY"
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`http://localhost:8585`{{% /code-placeholder-key %}}: the host and port of the node to query
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query
- {{% code-placeholder-key %}}`QUERY`{{% /code-placeholder-key %}}: the SQL or InfluxQL query to run against the database
### File index settings
To accelerate performance on specific queries, you can define non-primary keys to index on, which helps improve performance for single-series queries.
This feature is only available in Enterprise and is not available in Core.
This feature is only available in {{% product-name %}} and is not available in Core.
#### Create a file index
{{% code-placeholders "AUTH_TOKEN|DATABASE|TABLE|COLUMNS" %}}
```bash
# Example variables on a query
# HTTP-bound Port: 8585
influxdb3 create file_index \
--host http://localhost:8585 \
--database <DATABASE> \
--table <TABLE> \
<COLUMNS>
--token AUTH_TOKEN \
--database DATABASE_NAME \
--table TABLE_NAME \
COLUMNS
```
#### Delete a file index
@ -1471,6 +1663,14 @@ influxdb3 create file_index \
```bash
influxdb3 delete file_index \
--host http://localhost:8585 \
--database <DATABASE> \
--table <TABLE> \
--database DATABASE_NAME \
--table TABLE_NAME \
```
{{% /code-placeholders %}}
Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the file index in
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the file index in
- {{% code-placeholder-key %}}`COLUMNS`{{% /code-placeholder-key %}}: a comma-separated list of columns to index on, for example, `host,application`

View File

@ -5,10 +5,13 @@
pre-commit:
parallel: true
commands:
build-copilot-instructions:
glob: "CONTRIBUTING.md"
run: yarn build:copilot-instructions
# Report linting warnings and errors, don't output files to stdout
lint-markdown:
tags: lint
glob: 'content/*.md'
glob: "{CONTRIBUTING.md,content/*.md}"
run: |
docker compose run --rm --name remark-lint remark-lint '{staged_files}'
cloud-lint:

View File

@ -41,6 +41,7 @@
},
"scripts": {
"build:pytest:image": "docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .",
"build:copilot-instructions": "node ./build-scripts/build-copilot-instructions.js",
"lint": "LEFTHOOK_EXCLUDE=test lefthook run pre-commit && lefthook run pre-push",
"pre-commit": "lefthook run pre-commit",
"test": "echo \"Run 'yarn test:e2e', 'yarn test:links', 'yarn test:codeblocks:all' or a specific test command. e2e and links test commands can take a glob of file paths to test. Some commands run automatically during the git pre-commit and pre-push hooks.\" && exit 0",