Merge branch 'master' into docs/notify-docs

docs/notify-docs
Jameelah Mercer 2025-02-26 09:44:29 -08:00 committed by GitHub
commit 9de35c16c6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 2015 additions and 158 deletions

View File

@ -244,6 +244,7 @@
&.blue {color: $b-dodger;}
&.green {color: $gr-viridian;}
&.magenta {color: $p-comet;}
&.pink {color: $br-new-magenta;}
}
h2,

View File

@ -904,6 +904,244 @@ table tr.point{
}
}
//////////////////////// SQL WINDOW FRAME UNITS EXAMPLES ///////////////////////
table.window-frame-units {
&.groups {
.group {
position: relative;
outline-style: solid;
outline-width: 3px;
outline-offset: -5px;
border-radius: 10px;
&::before {
content: "Row Group";
display: block;
padding: .25rem .5rem;
position: absolute;
top: 3px;
left: 3px;
border-radius: 4px;
color: #fff;
font-size: .8rem;
font-weight: bold;
text-transform: uppercase;
letter-spacing: .02em;
box-shadow: 4px 4px 4px $article-bg;
}
td:nth-child(2), td:nth-child(3) {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
}
&:nth-of-type(1) {
&::before {background: $br-new-magenta;}
outline-color: $br-new-magenta;
td:nth-child(2), td:nth-child(3) {
text-decoration-color: $br-new-magenta;
}
}
&:nth-of-type(2) {
&::before {background: $br-new-purple;}
outline-color: $br-new-purple;
td:nth-child(2), td:nth-child(3) {
text-decoration-color: $br-new-purple;
}
}
&:nth-of-type(3) {
&::before {background: $b-dodger;}
outline-color: $b-dodger;
td:nth-child(2), td:nth-child(3) {
text-decoration-color: $b-dodger;
}
}
&:nth-of-type(4) {
&::before {background: $b-sapphire;}
outline-color: $b-sapphire;
td:nth-child(2), td:nth-child(3) {
text-decoration-color: $b-sapphire;
}
}
}
}
&.groups-with-frame {
.frame, tr.current-row {
position: relative;
outline-style: solid;
outline-width: 3px;
outline-offset: -5px;
border-radius: 10px;
&::after {
display: block;
padding: .25rem .5rem;
position: absolute;
top: 3px;
left: 3px;
border-radius: 4px;
color: #fff;
font-size: .8rem;
font-weight: bold;
text-transform: uppercase;
letter-spacing: .02em;
box-shadow: 4px 4px 4px $article-bg;
}
tr:nth-child(n + 1):nth-child(-n + 3) {
td {text-decoration-color: $br-new-magenta;}
}
tr:nth-child(n + 4):nth-child(-n + 6) {
td {text-decoration-color: $br-magenta;}
}
tr:nth-child(n + 7):nth-child(-n + 8) {
td {text-decoration-color: $b-dodger;}
}
td:nth-child(n + 2):nth-child(-n + 3) {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
}
}
tr.current-row {
outline-color: $br-new-magenta;
&::after {
content: "Current Row";
background: $br-new-magenta;
}
td {text-decoration-color: $b-dodger !important;}
}
.frame {
outline-color: $br-new-purple;
&::after {
content: "Frame";
background: $br-new-purple;
}
}
.group {
position: relative;
outline-color: $b-sapphire;
td:nth-child(2), td:nth-child(3) {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
text-decoration-color: $b-sapphire;
}
}
}
&.range-interval {
.frame, tr.current-row {
position: relative;
outline-style: solid;
outline-width: 3px;
outline-offset: -5px;
border-radius: 10px;
td:first-child {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
text-decoration-color: $br-new-purple;
}
&::after {
display: block;
padding: .25rem .5rem;
position: absolute;
top: 3px;
right: 3px;
border-radius: 4px;
color: #fff;
font-size: .8rem;
font-weight: bold;
text-transform: uppercase;
letter-spacing: .02em;
box-shadow: -4px 4px 4px $article-bg;
}
}
tr.current-row {
outline-color: $br-new-magenta;
td:first-child {text-decoration-color: $br-new-magenta;}
&::after {
content: "Current Row";
background: $br-new-magenta;
box-shadow: -4px 4px 4px $article-table-row-alt;
}
}
.frame {
outline-color: $br-new-purple;
&::after {
content: "Frame";
background: $br-new-purple;
}
}
}
&.range-numeric, &.rows {
.frame, tr.current-row {
position: relative;
outline-style: solid;
outline-width: 3px;
outline-offset: -5px;
border-radius: 10px;
&::after {
display: block;
padding: .25rem .5rem;
position: absolute;
top: 3px;
left: 3px;
border-radius: 4px;
color: #fff;
font-size: .8rem;
font-weight: bold;
text-transform: uppercase;
letter-spacing: .02em;
box-shadow: 4px 4px 4px $article-bg;
}
}
tr.current-row {
outline-color: $br-new-magenta;
&::after {
content: "Current Row";
background: $br-new-magenta;
}
}
.frame {
outline-color: $br-new-purple;
&::after {
content: "Frame";
background: $br-new-purple;
}
}
}
&.range-numeric {
.frame {
td:nth-child(3) {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
text-decoration-color: $br-new-purple;
}
tr.current-row {
td:nth-child(3) {text-decoration-color: $br-new-magenta;}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// MEDIA QUERIES ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////

View File

@ -303,6 +303,29 @@ Very useful for troubleshooting, but will log any sensitive data contained withi
Environment variable: `INFLUXDB_DATA_QUERY_LOG_ENABLED`
#### query-log-path
An absolute path to the query log file.
The default is `""` (queries aren't logged to a file).
Query logging supports SIGHUP-based log rotation.
The following is an example of a `logrotate` configuration:
```
/var/log/influxdb/queries.log {
rotate 5
daily
compress
missingok
notifempty
create 644 root root
postrotate
/bin/kill -HUP `pgrep -x influxd`
endscript
}
```
#### wal-fsync-delay
Default is `"0s"`.

View File

@ -0,0 +1,18 @@
---
title: SQL window functions
list_title: Window functions
description: >
SQL window functions perform an operation across a set of rows related to the
current row.
menu:
influxdb3_cloud_dedicated:
name: Window
parent: sql-functions
weight: 309
source: /shared/sql-reference/functions/window.md
---
<!--
The content for this page is at content/shared/sql-reference/functions/window.md
-->

View File

@ -0,0 +1,18 @@
---
title: SQL window functions
list_title: Window functions
description: >
SQL window functions perform an operation across a set of rows related to the
current row.
menu:
influxdb3_cloud_serverless:
name: Window
parent: sql-functions
weight: 309
source: /shared/sql-reference/functions/window.md
---
<!--
The content for this page is at content/shared/sql-reference/functions/window.md
-->

View File

@ -15,10 +15,16 @@ related:
---
Use Kubernetes to upgrade your InfluxDB Clustered version.
The upgrade is carried out using in-place updates, ensuring minimal downtime.
InfluxDB Clustered versioning is defined in the `AppInstance`
`CustomResourceDefinition` (CRD) in your
[`myinfluxdb.yml`](/influxdb3/clustered/install/set-up-cluster/configure-cluster/).
> [!Important]
> InfluxDB Clustered does not support downgrading.
> If you encounter an issue after upgrading,
> [contact InfluxData support](mailto:support@influxdata.com).
- [Version format](#version-format)
- [Upgrade your InfluxDB Clustered version](#upgrade-your-influxdb-clustered-version)

View File

@ -10,17 +10,16 @@ menu:
weight: 201
---
{{% note %}}
## Checkpoint releases {.checkpoint}
Some InfluxDB Clustered releases are checkpoint releases that introduce a
breaking change to an InfluxDB component.
When [upgrading InfluxDB Clustered](/influxdb3/clustered/admin/upgrade/),
**always upgrade to each checkpoint release first, before proceeding to newer versions**.
Checkpoint releases are only made when absolutely necessary and are clearly
identified below with the <span class="cf-icon Shield pink"></span> icon.
{{% /note %}}
> [!Note]
> ## Checkpoint releases {.checkpoint}
>
> Some InfluxDB Clustered releases are checkpoint releases that introduce a
> breaking change to an InfluxDB component.
> When [upgrading InfluxDB Clustered](/influxdb3/clustered/admin/upgrade/),
> **always upgrade to each checkpoint release first, before proceeding to newer versions**.
>
> Checkpoint releases are only made when absolutely necessary and are clearly
> identified below with the <span class="cf-icon Shield pink"></span> icon.
{{< release-toc >}}
@ -181,11 +180,10 @@ For customers who experience this bug when attempting to upgrade to
## 20240925-1257864 {date="2024-09-25" .checkpoint}
{{% warn %}}
This release has a number of bugs in it which make it unsuitable for customer use.
If you are currently running this version, please upgrade to
[20241024-1354148](#20241024-1354148).
{{% /warn %}}
> [!Caution]
> This release has a number of bugs in it which make it unsuitable for customer use.
> If you are currently running this version, please upgrade to
> [20241024-1354148](#20241024-1354148).
### Quickstart
@ -352,10 +350,9 @@ validation error when omitted.
When the `admin` section is omitted, the `admin-token` `Secret` can be used
instead to get started quickly.
{{% note %}}
We still highly recommend OAuth for production; however, this lets you run an
InfluxDB Cluster with out having to integrate with an identity provider.**
{{% /note %}}
> [!Note]
> We recommend OAuth for production; however, the `admin-token` lets you run an
> InfluxDB Cluster without having to integrate with an identity provider.**
### Upgrade notes
@ -680,11 +677,10 @@ Kubernetes scheduler's default behavior. For further details, please consult the
- Fix gRPC reflection to only include services served by a particular listening
port.
{{% note %}}
`arrow.flight.protocol.FlightService` is known to be missing in the
`iox-shared-querier`'s reflection service even though `iox-shared-querier`
does run that gRPC service.
{{% /note %}}
> [!Note]
> `arrow.flight.protocol.FlightService` is known to be missing in the
> `iox-shared-querier`'s reflection service even though `iox-shared-querier`
> does run that gRPC service.
---
@ -889,10 +885,9 @@ spec:
### Highlights
{{% warn %}}
**This release fixes a regression in the database engine that was introduced in
[20231115-746129](#20231115-746129).**
{{% /warn %}}
> ![Important]
> **This release fixes a regression in the database engine that was introduced in
> [20231115-746129](#20231115-746129).**
### Changes

View File

@ -0,0 +1,18 @@
---
title: SQL window functions
list_title: Window functions
description: >
SQL window functions perform an operation across a set of rows related to the
current row.
menu:
influxdb3_clustered:
name: Window
parent: sql-functions
weight: 309
source: /shared/sql-reference/functions/window.md
---
<!--
The content for this page is at content/shared/sql-reference/functions/window.md
-->

View File

@ -1,11 +1,15 @@
---
title: Python Plugins and Processing Engine
title: Processing engine and Python plugins
description: Use the Python processing engine to trigger and execute custom code on different events in an {{< product-name >}} instance.
menu:
influxdb3_core:
name: Processing Engine and Python Plugins
name: Processing engine and Python plugins
weight: 4
influxdb3/core/tags: []
related:
- /influxdb3/core/reference/cli/influxdb3/test/wal_plugin/
- /influxdb3/core/reference/cli/influxdb3/create/plugin/
- /influxdb3/core/reference/cli/influxdb3/create/trigger/
source: /shared/v3-core-plugins/_index.md
---

View File

@ -0,0 +1,18 @@
---
title: SQL window functions
list_title: Window functions
description: >
SQL window functions perform an operation across a set of rows related to the
current row.
menu:
influxdb3_core:
name: Window
parent: sql-functions
weight: 309
source: /shared/sql-reference/functions/window.md
---
<!--
The content for this page is at content/shared/sql-reference/functions/window.md
-->

View File

@ -1,11 +1,15 @@
---
title: Python Plugins and Processing Engine
title: Processing engine and Python plugins
description: Use the Python processing engine to trigger and execute custom code on different events in an {{< product-name >}} instance.
menu:
influxdb3_enterprise:
name: Processing Engine and Python Plugins
name: Processing engine and Python plugins
weight: 4
influxdb3/enterprise/tags: []
influxdb3/core/tags: []
related:
- /influxdb3/enterprise/reference/cli/influxdb3/test/wal_plugin/
- /influxdb3/enterprise/reference/cli/influxdb3/create/plugin/
- /influxdb3/enterprise/reference/cli/influxdb3/create/trigger/
source: /shared/v3-core-plugins/_index.md
---

View File

@ -0,0 +1,18 @@
---
title: SQL window functions
list_title: Window functions
description: >
SQL window functions perform an operation across a set of rows related to the
current row.
menu:
influxdb3_enterprise:
name: Window
parent: sql-functions
weight: 309
source: /shared/sql-reference/functions/window.md
---
<!--
The content for this page is at content/shared/sql-reference/functions/window.md
-->

View File

@ -582,6 +582,58 @@ FROM "h2o_feet"
GROUP BY "location"
```
### Window aggregate functions
Window functions let you calculate running totals, moving averages, or other
aggregate-like results without collapsing rows into groups
(unlike non-window aggregate functions).
Window aggregate functions include **all [aggregate functions](#aggregate-functions/)**
and the [ranking functions](#ranking-functions).
The SQL `OVER` clause syntactically distinguishes a window
function from a non-window or aggregate function and defines how to group and
order rows for the window operation.
#### Examples:
{{% influxdb/custom-timestamps %}}
```sql
SELECT
time,
room,
temp,
avg(temp) OVER (PARTITION BY room) AS avg_room_temp
FROM
home
WHERE
time >= '2022-01-01T08:00:00Z'
AND time <= '2022-01-01T09:00:00Z'
ORDER BY
room,
time
```
| time | room | temp | avg_room_temp |
| :------------------ | :---------- | ---: | ------------: |
| 2022-01-01T08:00:00 | Kitchen | 21.0 | 22.0 |
| 2022-01-01T09:00:00 | Kitchen | 23.0 | 22.0 |
| 2022-01-01T08:00:00 | Living Room | 21.1 | 21.25 |
| 2022-01-01T09:00:00 | Living Room | 21.4 | 21.25 |
{{% /influxdb/custom-timestamps %}}
#### Ranking Functions
| Function | Description |
| :------- | :--------------------------------------------------------- |
| CUME_DIST() | Returns the cumulative distribution of a value within a group of values |
| DENSE_RANK() | Returns a rank for each row without gaps in the numbering |
| NTILE() | Distributes the rows in an ordered partition into the specified number of groups |
| PERCENT_RANK() | Returns the percentage rank of the current row within its partition |
| RANK() | Returns the rank of the current row in its partition, allowing gaps between ranks |
| ROW_NUMBER() | Returns the position of the current row in its partition |
### Selector functions
Selector functions are unique to InfluxDB. They behave like aggregate functions in that they take a row of data and compute it down to a single value. However, selectors are unique in that they return a **time value** in addition to the computed value. In short, selectors return an aggregated value along with a timestamp.

File diff suppressed because it is too large Load Diff

View File

@ -135,8 +135,12 @@ source ~/.zshrc
To start your InfluxDB instance, use the `influxdb3 serve` command
and provide the following:
- `--object-store`: Specifies the type of Object store to use. InfluxDB supports the following: local file system (`file`), `memory`, S3 (and compatible services like Ceph or Minio) (`s3`), Google Cloud Storage (`google`), and Azure Blob Storage (`azure`).
- `--node-id`: A string identifier that determines the server's storage path within the configured storage location
- `--object-store`: Specifies the type of Object store to use.
InfluxDB supports the following: local file system (`file`), `memory`,
S3 (and compatible services like Ceph or Minio) (`s3`),
Google Cloud Storage (`google`), and Azure Blob Storage (`azure`).
- `--node-id`: A string identifier that determines the server's storage path
within the configured storage location, and, in a multi-node setup, is used to reference the node.
The following examples show how to start InfluxDB 3 with different object store configurations:
@ -216,7 +220,7 @@ InfluxDB is a schema-on-write database. You can start writing data and InfluxDB
After a schema is created, InfluxDB validates future write requests against it before accepting the data.
Subsequent requests can add new fields on-the-fly, but can't add new tags.
InfluxDB 3 Core is optimized for recent data, but accepts writes from any time period. It persists that data in Parquet files for access by third-party systems for longer term historical analysis and queries. If you require longer historical queries with a compactor that optimizes data organization, consider using [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/).
{{% product-name %}} is optimized for recent data, but accepts writes from any time period. It persists that data in Parquet files for access by third-party systems for longer term historical analysis and queries. If you require longer historical queries with a compactor that optimizes data organization, consider using [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/).
The database has three write API endpoints that respond to HTTP `POST` requests:
@ -278,7 +282,7 @@ With `accept_partial=true`:
```
Line `1` is written and queryable.
The response is an HTTP error (`400`) status, and the response body contains `partial write of line protocol occurred` and details about the problem line.
The response is an HTTP error (`400`) status, and the response body contains the error message `partial write of line protocol occurred` with details about the problem line.
##### Parsing failed for write_lp endpoint
@ -323,7 +327,7 @@ For more information, see [diskless architecture](#diskless-architecture).
> Because InfluxDB sends a write response after the WAL file has been flushed to the configured object store (default is every second), individual write requests might not complete quickly, but you can make many concurrent requests to achieve higher total throughput.
> Future enhancements will include an API parameter that lets requests return without waiting for the WAL flush.
#### Create a database or Table
#### Create a database or table
To create a database without writing data, use the `create` subcommand--for example:
@ -340,9 +344,10 @@ influxdb3 create -h
### Query the database
InfluxDB 3 now supports native SQL for querying, in addition to InfluxQL, an
SQL-like language customized for time series queries. {{< product-name >}} limits
query time ranges to 72 hours (both recent and historical) to ensure query performance.
SQL-like language customized for time series queries.
{{< product-name >}} limits
query time ranges to 72 hours (both recent and historical) to ensure query performance.
For more information about the 72-hour limitation, see the
[update on InfluxDB 3 Cores 72-hour limitation](https://www.influxdata.com/blog/influxdb3-open-source-public-alpha-jan-27/).
@ -400,7 +405,7 @@ $ influxdb3 query --database=servers "SELECT DISTINCT usage_percent, time FROM c
### Querying using the CLI for InfluxQL
[InfluxQL](/influxdb3/core/reference/influxql/) is an SQL-like language developed by InfluxData with specific features tailored for leveraging and working with InfluxDB. Its compatible with all versions of InfluxDB, making it a good choice for interoperability across different InfluxDB installations.
[InfluxQL](/influxdb3/version/reference/influxql/) is an SQL-like language developed by InfluxData with specific features tailored for leveraging and working with InfluxDB. Its compatible with all versions of InfluxDB, making it a good choice for interoperability across different InfluxDB installations.
To query using InfluxQL, enter the `influxdb3 query` subcommand and specify `influxql` in the language option--for example:
@ -499,7 +504,7 @@ You can use the `influxdb3` CLI to create a last value cache.
Usage: $ influxdb3 create last_cache [OPTIONS] -d <DATABASE_NAME> -t <TABLE> [CACHE_NAME]
Options:
-h, --host <HOST_URL> URL of the running InfluxDB 3 Core server [env: INFLUXDB3_HOST_URL=]
-h, --host <HOST_URL> URL of the running {{% product-name %}} server [env: INFLUXDB3_HOST_URL=]
-d, --database <DATABASE_NAME> The database to run the query against [env: INFLUXDB3_DATABASE_NAME=]
--token <AUTH_TOKEN> The token for authentication [env: INFLUXDB3_AUTH_TOKEN=]
-t, --table <TABLE> The table for which the cache is created
@ -569,35 +574,26 @@ influxdb3 create distinct_cache -h
The InfluxDB 3 Processing engine is an embedded Python VM for running code inside the database to process and transform data.
To use the Processing engine, you create [plugins](#plugin) and [triggers](#trigger).
To activate the Processing engine, pass the `--plugin-dir <PLUGIN_DIR>` option when starting the {{% product-name %}} server.
`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the Processing engine to run.
#### Plugin
A plugin is a Python function that has a signature compatible with one of the [trigger types](#trigger-types).
The [`influxdb3 create plugin`](/influxdb3/core/reference/cli/influxdb3/create/plugin/) command loads a Python plugin file into the server.
A plugin is a Python function that has a signature compatible with a Processing engine [trigger](#trigger).
#### Trigger
After you load a plugin into an InfluxDB 3 server, you can create one or more
triggers associated with the plugin.
When you create a trigger, you specify a plugin, a database, optional runtime arguments,
and a trigger-spec, which specifies `all_tables` or `table:my_table_name` (for filtering data sent to the plugin).
When you _enable_ a trigger, the server executes the plugin code according to the
plugin signature.
When you create a trigger, you specify a [plugin](#plugin), a database, optional arguments,
and a _trigger-spec_, which defines when the plugin is executed and what data it receives.
##### Trigger types
InfluxDB 3 provides the following types of triggers:
InfluxDB 3 provides the following types of triggers, each with specific trigger-specs:
- **On WAL flush**: Sends the batch of write data to a plugin once a second (configurable).
> [!Note]
> Currently, only the **WAL flush** trigger is supported, but more are on the way:
>
> - **On Snapshot**: Sends metadata to a plugin for further processing against the Parquet data or to send the information elsewhere (for example, to an Iceberg Catalog). _Not yet available._
> - **On Schedule**: Executes a plugin on a user-configured schedule, useful for data collection and deadman monitoring. _Not yet available._
> - **On Request**: Binds a plugin to an HTTP endpoint at `/api/v3/plugins/<name>`. _Not yet available._
> The plugin receives the HTTP request headers and content, and can then parse, process, and send the data into the database or to third-party services.
- **On WAL flush**: Sends a batch of written data (for a specific table or all tables) to a plugin (by default, every second).
- **On Schedule**: Executes a plugin on a user-configured schedule (using a crontab or a duration); useful for data collection and deadman monitoring.
- **On Request**: Binds a plugin to a custom HTTP API endpoint at `/api/v3/engine/<ENDPOINT>`.
The plugin receives the HTTP request headers and content, and can then parse, process, and send the data into the database or to third-party services.
### Test, create, and trigger plugin code
@ -686,7 +682,7 @@ Test your InfluxDB 3 plugin safely without affecting written data. During a plug
To test a plugin, do the following:
1. Create a _plugin directory_--for example, `/path/to/.influxdb/plugins`
2. [Start the InfluxDB server](#start-influxdb) and include the `--plugin-dir` option with your plugin directory path.
2. [Start the InfluxDB server](#start-influxdb) and include the `--plugin-dir <PATH>` option.
3. Save the [preceding example code](#example-python-plugin) to a plugin file inside of the plugin directory. If you haven't yet written data to the table in the example, comment out the lines where it queries.
4. To run the test, enter the following command with the following options:
@ -706,7 +702,7 @@ You can quickly see how the plugin behaves, what data it would have written to t
You can then edit your Python code in the plugins directory, and rerun the test.
The server reloads the file for every request to the `test` API.
For more information, see [`influxdb3 test wal_plugin`](/influxdb3/core/reference/cli/influxdb3/test/wal_plugin/) or run `influxdb3 test wal_plugin -h`.
For more information, see [`influxdb3 test wal_plugin`](/influxdb3/version/reference/cli/influxdb3/test/wal_plugin/) or run `influxdb3 test wal_plugin -h`.
With the plugin code inside the server plugin directory, and a successful test,
you're ready to create a plugin and a trigger to run on the server.
@ -729,14 +725,6 @@ influxdb3 test wal_plugin \
test.py
```
```bash
# Create a plugin to run
influxdb3 create plugin \
-d mydb \
--code-filename="/path/to/.influxdb3/plugins/test.py" \
test_plugin
```
```bash
# Create a trigger that runs the plugin
influxdb3 create trigger \
@ -754,11 +742,7 @@ enable the trigger and have it run the plugin as you write data:
influxdb3 enable trigger --database mydb trigger1
```
For more information, see the following:
- [`influxdb3 test wal_plugin`](/influxdb3/core/reference/cli/influxdb3/test/wal_plugin/)
- [`influxdb3 create plugin`](/influxdb3/core/reference/cli/influxdb3/create/plugin/)
- [`influxdb3 create trigger`](/influxdb3/core/reference/cli/influxdb3/create/trigger/)
For more information, see [Python plugins and the Processing engine](/influxdb3/version/plugins/).
### Diskless architecture

View File

@ -5,7 +5,12 @@ for different database events.
{{% product-name %}} provides the InfluxDB 3 Processing engine, an embedded Python VM that can dynamically load and trigger Python plugins
in response to events in your database.
## Plugins
## Key Concepts
### Plugins
A Processing engine _plugin_ is Python code you provide to run tasks, such as
downsampling data, monitoring, creating alerts, or calling external services.
> [!Note]
> #### Contribute and use community plugins
@ -14,28 +19,30 @@ in response to events in your database.
> and contribute example plugins.
> You can reference plugins from the repository directly within a trigger configuration.
A Processing engine _plugin_ is Python code you provide to run tasks, such as
downsampling data, monitoring, creating alerts, or calling external services.
## Triggers
### Triggers
A _trigger_ is an InfluxDB 3 resource you create to associate a database
event (for example, a WAL flush) with the plugin that should run.
When an event occurs, the trigger passes configuration, optional arguments, and event data to the plugin.
When an event occurs, the trigger passes configuration details, optional arguments, and event data to the plugin.
The Processing engine provides four types of plugins and triggers--each type corresponds to an event type with event-specific configuration to let you handle events with targeted logic.
The Processing engine provides four types of triggers--each type corresponds to
an event type with event-specific configuration to let you handle events with targeted logic.
- **WAL flush**: Triggered when the write-ahead log (WAL) is flushed to the object store (default is every second)
- **Parquet persistence (coming soon)**: Triggered when InfluxDB 3 persists data to object store Parquet files
- **Scheduled tasks**: Triggered on a schedule you specify using cron syntax
- **On Request**: Bound to the HTTP API `/api/v3/engine/<CUSTOM_PATH>` endpoint and triggered by a GET or POST request to the endpoint.
- **WAL Flush**: Triggered when the write-ahead log (WAL) is flushed to the object store (default is every second).
- **Scheduled Tasks**: Triggered on a schedule you specify using cron syntax.
- **On-request**: Triggered on a GET or POST request to the bound HTTP API endpoint at `/api/v3/engine/<CUSTOM_PATH>`.
<!--
- **Parquet Persistence (coming soon)**: Triggered when InfluxDB 3 persists data to object storage Parquet files.
-->
## Activate the Processing engine
### Activate the Processing engine
To enable the Processing engine, start the {{% product-name %}} server with the `--plugin-dir` option and a path to your plugins directory (it doesn't need to exist yet)--for example:
To enable the Processing engine, start the {{% product-name %}} server with the
`--plugin-dir` option and a path to your plugins directory.
If the directory doesnt exist, the server creates it.
```bash
influxdb3 serve --node-id node0 --plugin-dir /path/to/plugins
influxdb3 serve --node-id node0 --object-store [OBJECT STORE TYPE] --plugin-dir /path/to/plugins
```
## Shared API
@ -47,7 +54,7 @@ The shared API provides access to the following:
- `query` to query data from any database
- `info`, `warn`, and `error` to log messages to the database log, which is output in the server logs and captured in system tables queryable by SQL
### Line builder
### LineBuilder
The `LineBuilder` is a simple API for building lines of Line Protocol to write into the database. Writes are buffered while the plugin runs and are flushed when the plugin completes. The `LineBuilder` API is available in all plugin types.
@ -188,22 +195,22 @@ The shared API `query` function executes an SQL query with optional parameters (
The following examples show how to use the `query` function:
```python
influxdb3_local.query("SELECT * from foo where bar = 'baz' and time > now() - 'interval 1 hour'")
influxdb3_local.query("SELECT * from foo where bar = 'baz' and time > now() - INTERVAL '1 hour'")
# Or using parameterized queries
args = {"bar": "baz"}
influxdb3_local.query("SELECT * from foo where bar = $bar and time > now() - 'interval 1 hour'", args)
influxdb3_local.query("SELECT * from foo where bar = $bar and time > now() - INTERVAL '1 hour'", args)
```
### Logging
The shared API `info`, `warn`, and `error` functions log messages to the database log, which is output in the server logs and captured in system tables queryable by SQL.
The `info`, `warn`, and `error` functions are available in all plugin types. The functions take an arbitrary number of arguments, convert them to strings, and then join them into a single message separated by a space.
The `info`, `warn`, and `error` functions are available in all plugin types. Each function accepts multiple arguments, converts them to strings, and logs them as a single, space-separated message.
The following examples show to use the `info`, `warn`, and `error` logging functions:
The following examples show how to use the `info`, `warn`, and `error` logging functions:
```python
ifluxdb3_local.info("This is an info message")
influxdb3_local.info("This is an info message")
influxdb3_local.warn("This is a warning message")
influxdb3_local.error("This is an error message")
@ -214,15 +221,31 @@ influxdb3_local.info("This is an info message with an object", obj_to_log)
### Trigger arguments
Every plugin type can receive arguments from the configuration of the trigger that runs it.
You can use this to provide runtime configuration and drive behavior of a plugin--for example:
A plugin can receive arguments from the trigger that runs it.
You can use this to provide runtime configuration and drive behavior of a pluginfor example:
- threshold values for monitoring
- connection properties for connecting to third-party services
The arguments are passed as a `Dict[str, str]` where the key is the argument name and the value is the argument value.
To pass arguments to a plugin, specify trigger arguments in a comma-separated list
of key-value pairs--for example, using the CLI:
The following example shows how to use an argument in a WAL plugin:
```bash
influxdb3 create trigger
--trigger-arguments key1=val1,key2=val2
```
The arguments are passed to the plugin as a `Dict[str, str]` where the key is
the argument name and the value is the argument value--for example:
```python
args = {
"key1": "value1",
"key2": "value2",
}
```
The following example shows how to access and use an argument in a WAL plugin:
```python
def process_writes(influxdb3_local, table_batches, args=None):
@ -233,7 +256,7 @@ def process_writes(influxdb3_local, table_batches, args=None):
influxdb3_local.warn("No threshold provided")
```
The `args` parameter is optional and can be omitted from the trigger definition if the plugin doesn't need to use arguments.
The `args` parameter is optional. If a plugin doesnt require arguments, you can omit it from the trigger definition.
## Import plugin dependencies
@ -267,14 +290,17 @@ influxdb3 install package <PACKAGE_NAME>
```
The result is an active Python virtual environment with the package installed in `<PLUGINS_DIR>/.venv`.
You can pass additional options to use a `requirements.txt` file or a custom virtual environment path.
You can specify additional options to install dependencies from a `requirements.txt` file or a custom virtual environment path.
For more information, see the `influxdb3` CLI help:
```bash
influxdb3 install package --help
```
## WAL flush plugin
## Configure plugin triggers
Triggers define when and how plugins execute in response to database events. Each trigger type corresponds to a specific event, allowing precise control over automation within {{% product-name %}}.
### WAL flush trigger
When a WAL flush plugin is triggered, the plugin receives a list of `table_batches` filtered by the trigger configuration (either _all tables_ in the database or a specific table).
@ -302,7 +328,7 @@ def process_writes(influxdb3_local, table_batches, args=None):
influxdb3_local.info("wal_plugin.py done")
```
### WAL flush trigger Configuration
#### WAL flush trigger configuration
When you create a trigger, you associate it with a database and provide configuration specific to the trigger type.
@ -330,9 +356,9 @@ For more information about trigger arguments, see the CLI help:
influxdb3 create trigger help
```
## Schedule Plugin
### Schedule trigger
Schedule plugins run on a schedule specified in cron syntax. The plugin will receive the local API, the time of the trigger, and any arguments passed in the trigger definition. Here's an example of a simple schedule plugin:
Schedule plugins run on a schedule specified in cron syntax. The plugin receives the local API, the time of the trigger, and any arguments passed in the trigger definition. Here's an example of a simple schedule plugin:
```python
# see if a table has been written to in the last 5 minutes
@ -347,20 +373,23 @@ def process_scheduled_call(influxdb3_local, time, args=None):
influxdb3_local.error("No table_name provided for schedule plugin")
```
### Schedule Trigger Configuration
#### Schedule trigger configuration
Schedule plugins are set with a `trigger-spec` of `schedule:<cron_expression>` or `every:<duration>`. The `args` parameter can be used to pass configuration to the plugin. For example, if we wanted to use the system-metrics example from the Github repo and have it collect every 10 seconds we could use the following trigger definition:
```shell
```bash
influxdb3 create trigger \
--trigger-spec "every:10s" \
--plugin-filename "gh:examples/schedule/system_metrics/system_metrics.py" \
--database mydb system-metrics
```
## On Request Plugin
### On Request trigger
On Request plugins are triggered by a request to a specific endpoint under `/api/v3/engine`. The plugin will receive the local API, query parameters `Dict[str, str]`, request headers `Dict[str, str]`, request body (as bytes), and any arguments passed in the trigger definition. Here's an example of a simple On Request plugin:
On Request plugins are triggered by a request to a custom HTTP API endpoint.
The plugin receives the shared API, query parameters `Dict[str, str]`, request headers `Dict[str, str]`, the request body (as bytes), and any arguments passed in the trigger definition.
#### Example: On Request plugin
```python
import json
@ -385,15 +414,21 @@ def process_request(influxdb3_local, query_parameters, request_headers, request_
return 200, {"Content-Type": "application/json"}, json.dumps({"status": "ok", "line": line_str})
```
### On Request Trigger Configuration
#### On Request trigger configuration
On Request plugins are set with a `trigger-spec` of `request:<endpoint>`. The `args` parameter can be used to pass configuration to the plugin. For example, if we wanted the above plugin to run on the endpoint `/api/v3/engine/my_plugin`, we would use `request:my_plugin` as the `trigger-spec`.
To create a trigger for an On Request plugin, specify the `request:<ENDPOINT>` trigger-spec.
Trigger specs must be unique across all configured plugins, regardless of which database they are tied to, given the path is the same. Here's an example to create a request trigger tied to the "hello-world' path using a plugin in the plugin-dir:
For example, the following command creates an HTTP API `/api/v3/engine/my-plugin` endpoint for the plugin file:
```shell
```bash
influxdb3 create trigger \
--trigger-spec "request:hello-world" \
--plugin-filename "hellp/hello_world.py" \
--database mydb hello-world
--trigger-spec "request:my-plugin" \
--plugin-filename "examples/my-on-request.py" \
--database mydb my-plugin
```
To run the plugin, you send an HTTP request to `<HOST>/api/v3/engine/my-plugin`.
Because all On Request plugins for a server share the same `<host>/api/v3/engine/` base URL,
the trigger-spec you define must be unique across all plugins configured for a server,
regardless of which database they are associated with.

View File

@ -126,8 +126,12 @@ source ~/.zshrc
To start your InfluxDB instance, use the `influxdb3 serve` command
and provide the following:
- `--object-store`: Specifies the type of Object store to use. InfluxDB supports the following: local file system (`file`), `memory`, S3 (and compatible services like Ceph or Minio) (`s3`), Google Cloud Storage (`google`), and Azure Blob Storage (`azure`).
- `--node-id`: A string identifier that determines the server's storage path within the configured storage location, and, in a multi-node setup, is used to reference the node
- `--object-store`: Specifies the type of Object store to use.
InfluxDB supports the following: local file system (`file`), `memory`,
S3 (and compatible services like Ceph or Minio) (`s3`),
Google Cloud Storage (`google`), and Azure Blob Storage (`azure`).
- `--node-id`: A string identifier that determines the server's storage path
within the configured storage location, and, in a multi-node setup, is used to reference the node.
The following examples show how to start InfluxDB 3 with different object store configurations:
@ -273,7 +277,7 @@ With `accept_partial=true`:
```
Line `1` is written and queryable.
The response is an HTTP error (`400`) status, and the response body contains `partial write of line protocol occurred` and details about the problem line.
The response is an HTTP error (`400`) status, and the response body contains the error message `partial write of line protocol occurred` with details about the problem line.
##### Parsing failed for write_lp endpoint
@ -390,7 +394,7 @@ $ influxdb3 query --database=servers "SELECT DISTINCT usage_percent, time FROM c
### Querying using the CLI for InfluxQL
[InfluxQL](/influxdb3/enterprise/reference/influxql/) is an SQL-like language developed by InfluxData with specific features tailored for leveraging and working with InfluxDB. Its compatible with all versions of InfluxDB, making it a good choice for interoperability across different InfluxDB installations.
[InfluxQL](/influxdb3/version/reference/influxql/) is an SQL-like language developed by InfluxData with specific features tailored for leveraging and working with InfluxDB. Its compatible with all versions of InfluxDB, making it a good choice for interoperability across different InfluxDB installations.
To query using InfluxQL, enter the `influxdb3 query` subcommand and specify `influxql` in the language option--for example:
@ -489,7 +493,7 @@ You can use the `influxdb3` CLI to create a last value cache.
Usage: $ influxdb3 create last_cache [OPTIONS] -d <DATABASE_NAME> -t <TABLE> [CACHE_NAME]
Options:
-h, --host <HOST_URL> URL of the running InfluxDB 3 Enterprise server [env: INFLUXDB3_HOST_URL=]
-h, --host <HOST_URL> URL of the running {{% product-name %}} server [env: INFLUXDB3_HOST_URL=]
-d, --database <DATABASE_NAME> The database to run the query against [env: INFLUXDB3_DATABASE_NAME=]
--token <AUTH_TOKEN> The token for authentication [env: INFLUXDB3_AUTH_TOKEN=]
-t, --table <TABLE> The table for which the cache is created
@ -559,35 +563,26 @@ influxdb3 create distinct_cache -h
The InfluxDB 3 Processing engine is an embedded Python VM for running code inside the database to process and transform data.
To use the Processing engine, you create [plugins](#plugin) and [triggers](#trigger).
To activate the Processing engine, pass the `--plugin-dir <PLUGIN_DIR>` option when starting the {{% product-name %}} server.
`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the Processing engine to run.
#### Plugin
A plugin is a Python function that has a signature compatible with one of the [trigger types](#trigger-types).
The [`influxdb3 create plugin`](/influxdb3/enterprise/reference/cli/influxdb3/create/plugin/) command loads a Python plugin file into the server.
A plugin is a Python function that has a signature compatible with a Processing engine [trigger](#trigger).
#### Trigger
After you load a plugin into an InfluxDB 3 server, you can create one or more
triggers associated with the plugin.
When you create a trigger, you specify a plugin, a database, optional runtime arguments,
and a trigger-spec, which specifies `all_tables` or `table:my_table_name` (for filtering data sent to the plugin).
When you _enable_ a trigger, the server executes the plugin code according to the
plugin signature.
When you create a trigger, you specify a [plugin](#plugin), a database, optional arguments,
and a _trigger-spec_, which defines when the plugin is executed and what data it receives.
##### Trigger types
InfluxDB 3 provides the following types of triggers:
InfluxDB 3 provides the following types of triggers, each with specific trigger-specs:
- **On WAL flush**: Sends the batch of write data to a plugin once a second (configurable).
> [!Note]
> Currently, only the **WAL flush** trigger is supported, but more are on the way:
>
> - **On Snapshot**: Sends metadata to a plugin for further processing against the Parquet data or to send the information elsewhere (for example, to an Iceberg Catalog). _Not yet available._
> - **On Schedule**: Executes a plugin on a user-configured schedule, useful for data collection and deadman monitoring. _Not yet available._
> - **On Request**: Binds a plugin to an HTTP endpoint at `/api/v3/plugins/<name>`. _Not yet available._
> The plugin receives the HTTP request headers and content, and can then parse, process, and send the data into the database or to third-party services.
- **On WAL flush**: Sends a batch of written data (for a specific table or all tables) to a plugin (by default, every second).
- **On Schedule**: Executes a plugin on a user-configured schedule (using a crontab or a duration); useful for data collection and deadman monitoring.
- **On Request**: Binds a plugin to a custom HTTP API endpoint at `/api/v3/engine/<ENDPOINT>`.
The plugin receives the HTTP request headers and content, and can then parse, process, and send the data into the database or to third-party services.
### Test, create, and trigger plugin code
@ -676,7 +671,7 @@ Test your InfluxDB 3 plugin safely without affecting written data. During a plug
To test a plugin, do the following:
1. Create a _plugin directory_--for example, `/path/to/.influxdb/plugins`
2. [Start the InfluxDB server](#start-influxdb) and include the `--plugin-dir` option with your plugin directory path.
2. [Start the InfluxDB server](#start-influxdb) and include the `--plugin-dir <PATH>` option.
3. Save the [preceding example code](#example-python-plugin) to a plugin file inside of the plugin directory. If you haven't yet written data to the table in the example, comment out the lines where it queries.
4. To run the test, enter the following command with the following options:
@ -696,7 +691,7 @@ You can quickly see how the plugin behaves, what data it would have written to t
You can then edit your Python code in the plugins directory, and rerun the test.
The server reloads the file for every request to the `test` API.
For more information, see [`influxdb3 test wal_plugin`](/influxdb3/enterprise/reference/cli/influxdb3/test/wal_plugin/) or run `influxdb3 test wal_plugin -h`.
For more information, see [`influxdb3 test wal_plugin`](/influxdb3/version/reference/cli/influxdb3/test/wal_plugin/) or run `influxdb3 test wal_plugin -h`.
With the plugin code inside the server plugin directory, and a successful test,
you're ready to create a plugin and a trigger to run on the server.
@ -719,14 +714,6 @@ influxdb3 test wal_plugin \
test.py
```
```bash
# Create a plugin to run
influxdb3 create plugin \
-d mydb \
--code-filename="/path/to/.influxdb3/plugins/test.py" \
test_plugin
```
```bash
# Create a trigger that runs the plugin
influxdb3 create trigger \
@ -744,15 +731,12 @@ enable the trigger and have it run the plugin as you write data:
influxdb3 enable trigger --database mydb trigger1
```
For more information, see the following:
- [`influxdb3 test wal_plugin`](/influxdb3/enterprise/reference/cli/influxdb3/test/wal_plugin/)
- [`influxdb3 create plugin`](/influxdb3/enterprise/reference/cli/influxdb3/create/plugin/)
- [`influxdb3 create trigger`](/influxdb3/enterprise/reference/cli/influxdb3/create/trigger/)
For more information, see [Python plugins and the Processing engine](/influxdb3/version/plugins/).
### Diskless architecture
InfluxDB 3 is able to operate using only object storage with no locally attached disk. While it can use only a disk with no dependencies, the ability to operate without one is a new capability with this release. The figure below illustrates the write path for data landing in the database.
InfluxDB 3 is able to operate using only object storage with no locally attached disk.
While it can use only a disk with no dependencies, the ability to operate without one is a new capability with this release. The figure below illustrates the write path for data landing in the database.
{{< img-hd src="/img/influxdb/influxdb-3-write-path.png" alt="Write Path for InfluxDB 3 Core & Enterprise" />}}

View File

@ -0,0 +1,8 @@
{{- $productPathData := split .Page.RelPermalink "/" -}}
{{- $product := index $productPathData 2 -}}
{{- $isDistributed := in (slice "cloud-dedicated" "cloud-serverless" "clustered") $product -}}
{{- if $isDistributed -}}
<a href="/influxdb3/{{ $product }}/reference/sample-data/#get-started-home-sensor-data">Get started home sensor sample data</a>
{{- else -}}
<a href="/influxdb3/{{ $product }}/reference/sample-data/#home-sensor-data">Home sensor sample data</a>
{{- end -}}

View File

@ -0,0 +1,335 @@
{{ $unit := .Get 0 | default "groups" }}
{{ if eq $unit "groups" }}
<table class="window-frame-units groups">
<thead>
<tr>
<th style="text-align: left">time</th>
<th style="text-align: left">country</th>
<th style="text-align: left">city</th>
<th style="text-align: right">wind_direction</th>
</tr>
</thead>
<tbody class="group">
<tr>
<td style="text-align: left">2025-02-17T00:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Strasbourg</td>
<td style="text-align: right">181</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T01:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Strasbourg</td>
<td style="text-align: right">228</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T02:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Strasbourg</td>
<td style="text-align: right">289</td>
</tr>
</tbody>
<tbody class="group">
<tr>
<td style="text-align: left">2025-02-17T00:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Toulouse</td>
<td style="text-align: right">24</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T01:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Toulouse</td>
<td style="text-align: right">210</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T02:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Toulouse</td>
<td style="text-align: right">206</td>
</tr>
</tbody>
<tbody class="group">
<tr>
<td style="text-align: left">2025-02-17T00:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bari</td>
<td style="text-align: right">2</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T01:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bari</td>
<td style="text-align: right">57</td>
</tr>
</tbody>
<tbody class="group">
<tr>
<td style="text-align: left">2025-02-17T00:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bologna</td>
<td style="text-align: right">351</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T01:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bologna</td>
<td style="text-align: right">232</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T02:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bologna</td>
<td style="text-align: right">29</td>
</tr>
</tbody>
</table>
{{ else if eq $unit "groups with frame" }}
<table class="window-frame-units groups-with-frame">
<thead>
<tr>
<th style="text-align: left">time</th>
<th style="text-align: left">country</th>
<th style="text-align: left">city</th>
<th style="text-align: right">wind_direction</th>
</tr>
</thead>
<tbody class="frame">
<tr>
<td style="text-align: left">2025-02-17T00:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Strasbourg</td>
<td style="text-align: right">181</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T01:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Strasbourg</td>
<td style="text-align: right">228</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T02:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Strasbourg</td>
<td style="text-align: right">289</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T00:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Toulouse</td>
<td style="text-align: right">24</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T01:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Toulouse</td>
<td style="text-align: right">210</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T02:00:00</td>
<td style="text-align: left">France</td>
<td style="text-align: left">Toulouse</td>
<td style="text-align: right">206</td>
</tr>
<tr class="current-row">
<td style="text-align: left">2025-02-17T00:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bari</td>
<td style="text-align: right">2</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T01:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bari</td>
<td style="text-align: right">57</td>
</tr>
</tbody>
<tbody class="group">
<tr>
<td style="text-align: left">2025-02-17T00:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bologna</td>
<td style="text-align: right">351</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T01:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bologna</td>
<td style="text-align: right">232</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T02:00:00</td>
<td style="text-align: left">Italy</td>
<td style="text-align: left">Bologna</td>
<td style="text-align: right">29</td>
</tr>
</tbody>
</table>
{{ else if (eq $unit "range interval") }}
<table class="window-frame-units range-interval">
<thead>
<tr>
<th style="text-align: left">time</th>
<th style="text-align: left">room</th>
<th style="text-align: right">temp</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: left">2022-01-01T08:00:00</td>
<td style="text-align: left">Kitchen</td>
<td style="text-align: right">21.0</td>
</tr>
<tr>
<td style="text-align: left">2022-01-01T09:00:00</td>
<td style="text-align: left">Kitchen</td>
<td style="text-align: right">23.0</td>
</tr>
</tbody>
<tbody class="frame">
<tr>
<td style="text-align: left">2022-01-01T10:00:00</td>
<td style="text-align: left">Kitchen</td>
<td style="text-align: right">22.7</td>
</tr>
<tr>
<td style="text-align: left">2022-01-01T11:00:00</td>
<td style="text-align: left">Kitchen</td>
<td style="text-align: right">22.4</td>
</tr>
<tr>
<td style="text-align: left">2022-01-01T12:00:00</td>
<td style="text-align: left">Kitchen</td>
<td style="text-align: right">22.5</td>
</tr>
<tr class="current-row">
<td style="text-align: left">2022-01-01T13:00:00</td>
<td style="text-align: left">Kitchen</td>
<td style="text-align: right">22.8</td>
</tr>
<tr>
<td style="text-align: left">2022-01-01T14:00:00</td>
<td style="text-align: left">Kitchen</td>
<td style="text-align: right">22.8</td>
</tr>
</tbody>
<tbody>
<tr>
<td style="text-align: left">2022-01-01T15:00:00</td>
<td style="text-align: left">Kitchen</td>
<td style="text-align: right">22.7</td>
</tr>
</tbody>
</table>
{{ else if (eq $unit "range numeric") }}
<table class="window-frame-units range-numeric">
<thead>
<tr>
<th style="text-align: left">time</th>
<th style="text-align: left">city</th>
<th style="text-align: right">wind_direction</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: left">2025-02-17T13:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">33</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T08:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">34</td>
</tr>
</tbody>
<tbody class="frame">
<tr>
<td style="text-align: left">2025-02-17T23:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">49</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T17:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">86</td>
</tr>
<tr class="current-row">
<td style="text-align: left">2025-02-17T11:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">93</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T12:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">115</td>
</tr>
</tbody>
<tbody>
<tr>
<td style="text-align: left">2025-02-17T10:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">156</td>
</tr>
</tbody>
</table>
{{ else if (eq $unit "rows") }}
<table class="window-frame-units rows">
<thead>
<tr>
<th style="text-align: left">time</th>
<th style="text-align: left">city</th>
<th style="text-align: right">wind_direction</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: left">2025-02-17T08:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">34</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T10:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">156</td>
</tr>
</tbody>
<tbody class="frame">
<tr>
<td style="text-align: left">2025-02-17T11:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">93</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T12:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">115</td>
</tr>
<tr class="current-row">
<td style="text-align: left">2025-02-17T13:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">33</td>
</tr>
<tr>
<td style="text-align: left">2025-02-17T17:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">86</td>
</tr>
</tbody>
<tbody>
<tr>
<td style="text-align: left">2025-02-17T23:00:00</td>
<td style="text-align: left">Rome</td>
<td style="text-align: right">49</td>
</tr>
</tbody>
</table>
{{ end }}