chore(ci): Improve CI lint and test runners

- Reconfigures prettier linting.
- Adds .editorconfig to help with consistent editor settings
- Refactors test runs:
  - Removes test configuration from compose.yaml (not suited for this use case).
  - Splits test runner into test content setup and pytest that can be run separately or together (and with other test runners in the future).
  - Configuration is in Dockerfiles and command line (`.lintstagedrc.mjs`)
- Updates CONTRIBUTING.md
- Updates client library write examples in cloud-dedicated and clustered.
pull/5503/head
Jason Stirnaman 2024-06-21 18:41:07 -05:00
parent 37dd3eaa8d
commit 5c74f013a1
30 changed files with 2945 additions and 2261 deletions

6
.editorconfig Normal file
View File

@ -0,0 +1,6 @@
charset = utf-8
insert_final_newline = true
end_of_line = lf
indent_style = space
indent_size = 2
max_line_length = 80

6
.gitignore vendored
View File

@ -3,6 +3,8 @@
public public
.*.swp .*.swp
node_modules node_modules
.config*
**/.env*
*.log *.log
/resources /resources
.hugo_build.lock .hugo_build.lock
@ -10,4 +12,6 @@ node_modules
/api-docs/redoc-static.html* /api-docs/redoc-static.html*
.vscode/* .vscode/*
.idea .idea
package-lock.json config.toml
package-lock.json
tmp

View File

@ -1,2 +1 @@
npx lint-staged --relative --verbose npx lint-staged --relative
yarn run test

44
.lintstagedrc.mjs Normal file
View File

@ -0,0 +1,44 @@
// Lint-staged configuration. This file must export a lint-staged configuration object.
function lintStagedContent(paths, productPath) {
const name = `staged-${productPath.replace(/\//g, '-')}`;
return [
`prettier --write ${paths.join(' ')}`,
`docker build . -f Dockerfile.tests -t influxdata-docs/tests:latest`,
// Remove any existing test container.
`docker rm -f ${name} || true`,
`docker run --name ${name} --mount type=volume,target=/app/content --mount type=bind,src=./content,dst=/src/content --mount type=bind,src=./static/downloads,dst=/app/data
influxdata-docs/tests --files "${paths.join(' ')}"`,
`docker build . -f Dockerfile.pytest -t influxdata-docs/pytest:latest`,
// Run test runners. If tests fail, the container will be removed,
//but the "test-" container will remain until the next run.
`docker run --env-file ${productPath}/.env.test
--volumes-from ${name} --rm
influxdata-docs/pytest --codeblocks ${productPath}/`
];
}
export default {
"*.{js,css}": paths => `prettier --write ${paths.join(' ')}`,
// Don't let prettier check or write Markdown files for now;
// it indents code blocks within list items, which breaks Hugo's rendering.
// "*.md": paths => `prettier --check ${paths.join(' ')}`,
"content/influxdb/cloud-dedicated/**/*.md":
paths => lintStagedContent(paths, 'content/influxdb/cloud-dedicated'),
"content/influxdb/clustered/**/*.md":
paths => lintStagedContent(paths, 'content/influxdb/clustered'),
// "content/influxdb/cloud-serverless/**/*.md": "docker compose run -T lint --config=content/influxdb/cloud-serverless/.vale.ini --minAlertLevel=error",
// "content/influxdb/clustered/**/*.md": "docker compose run -T lint --config=content/influxdb/clustered/.vale.ini --minAlertLevel=error",
// "content/influxdb/{cloud,v2,telegraf}/**/*.md": "docker compose run -T lint --config=.vale.ini --minAlertLevel=error"
}

5
.prettierignore Normal file
View File

@ -0,0 +1,5 @@
# Ignore Prettier checking for files
**/.git
**/.svn
**/.hg
**/node_modules

View File

@ -1,4 +1,14 @@
trailingComma: "es5" # ~/.prettierrc.yaml
tabWidth: 2 printWidth: 80
semi: true semi: true
singleQuote: true singleQuote: true
tabWidth: 2
trailingComma: "es5"
useTabs: false
overrides:
- files:
- "*.md"
- "*.markdown"
options:
proseWrap: "preserve"
# Prettier also uses settings, such as indent, specified in .editorconfig

File diff suppressed because it is too large Load Diff

54
Dockerfile.pytest Normal file
View File

@ -0,0 +1,54 @@
FROM golang:latest
RUN apt-get update && apt-get upgrade -y && apt-get install -y \
curl \
git \
gpg \
jq \
maven \
nodejs \
npm \
python3 \
python3-pip \
python3-venv \
wget
RUN ln -s /usr/bin/python3 /usr/bin/python
# Create a virtual environment for Python to avoid conflicts with the system Python and having to use the --break-system-packages flag when installing packages with pip.
RUN python -m venv /opt/venv
# Enable venv
ENV PATH="/opt/venv/bin:$PATH"
# Prevents Python from writing pyc files.
ENV PYTHONDONTWRITEBYTECODE=1
# the application crashes without emitting any logs due to buffering.
ENV PYTHONUNBUFFERED=1
WORKDIR /app
# Some Python test dependencies (pytest-dotenv and pytest-codeblocks) aren't
# available as packages in apt-cache, so use pip to download dependencies in a # separate step and use Docker's caching.
COPY ./test/src/pytest.ini pytest.ini
COPY ./test/src/requirements.txt requirements.txt
RUN pip install -Ur requirements.txt
# Activate the Python virtual environment configured in the Dockerfile.
RUN . /opt/venv/bin/activate
### Install InfluxDB clients for testing
# Install InfluxDB keys to verify client installs.
# Follow the install instructions (https://docs.influxdata.com/telegraf/v1/install/?t=curl), except for sudo (which isn't available in Docker).
# influxdata-archive_compat.key GPG fingerprint:
# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
ADD https://repos.influxdata.com/influxdata-archive_compat.key ./influxdata-archive_compat.key
RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list
# Install InfluxDB clients to use in tests.
RUN apt-get update && apt-get -y install telegraf influxdb2-cli influxctl
COPY --chmod=755 ./test/config.toml /root/.config/influxctl/config.toml
### End InfluxDB client installs
ENTRYPOINT [ "pytest" ]
CMD [ "" ]

18
Dockerfile.tests Normal file
View File

@ -0,0 +1,18 @@
# Use the Dockerfile 1.2 syntax to leverage BuildKit features like cache mounts and inline mounts--temporary mounts that are only available during the build step, not at runtime.
# syntax=docker/dockerfile:1.2
# Starting from a Go base image is easier than setting up the Go environment later.
FROM python:3.9-slim
# Install the necessary packages for the test environment.
RUN apt-get update && apt-get upgrade -y && apt-get install -y \
rsync
COPY --chmod=755 ./test/src/parse_yaml.sh /usr/local/bin/parse_yaml
COPY --chmod=755 ./test/src/prepare-content.sh /usr/local/bin/prepare-content
COPY ./data/products.yml /app/appdata/products.yml
WORKDIR /src
ENTRYPOINT [ "prepare-content" ]
# The default command is an empty string to pass all command line arguments to the entrypoint and allow the entrypoint to run.
CMD [ "" ]

View File

@ -1,27 +1,39 @@
# This is a Docker Compose file for the InfluxData documentation site. # This is a Docker Compose file for the InfluxData documentation site.
## Run documentation tests for code samples. ## Run documentation tests for code samples.
name: influxdata-docs
volumes:
test-content:
services: services:
test: markdownlint:
image: docs-v2-tests image: davidanson/markdownlint-cli2:v0.13.0
container_name: docs-v2-tests container_name: markdownlint
profiles: profiles:
- test - ci
- lint
volumes: volumes:
- type: bind - type: bind
source: ./test source: .
target: /usr/src/app/test target: /workdir
- type: bind working_dir: /workdir
source: ./data
target: /usr/src/app/test/data
- type: bind
source: ./static/downloads
target: /usr/src/app/test/tmp/data
build: build:
context: . context: .
dockerfile: test.Dockerfile vale:
args: image: jdkato/vale:latest
- SOURCE_DIR=test container_name: vale
- DOCKER_IMAGE=docs-v2-tests profiles:
- ci
- lint
volumes:
- type: bind
source: .
target: /workdir
working_dir: /workdir
entrypoint: ["/bin/vale"]
build:
context: .
dockerfile_inline: |
COPY .ci /src/.ci
COPY **/.vale.ini /src/
## Run InfluxData documentation with the hugo development server on port 1313. ## Run InfluxData documentation with the hugo development server on port 1313.
## For more information about the hugomods/hugo image, see ## For more information about the hugomods/hugo image, see
## https://docker.hugomods.com/docs/development/docker-compose/ ## https://docker.hugomods.com/docs/development/docker-compose/

View File

@ -10,16 +10,16 @@ weight: 3
influxdb/cloud-dedicated/tags: [get-started] influxdb/cloud-dedicated/tags: [get-started]
--- ---
{{% product-name %}} is the platform purpose-built to collect, store, and InfluxDB is the platform purpose-built to collect, store, and query
query time series data. time series data.
It is powered by the InfluxDB 3.0 storage engine which provides a number of {{% product-name %}} is powered by the InfluxDB 3.0 storage engine, that
benefits including nearly unlimited series cardinality, improved query performance, provides nearly unlimited series cardinality,
and interoperability with widely used data processing tools and platforms. improved query performance, and interoperability with widely used data
processing tools and platforms.
**Time series data** is a sequence of data points indexed in time order. **Time series data** is a sequence of data points indexed in time order. Data
Data points typically consist of successive measurements made from the same points typically consist of successive measurements made from the same source
source and are used to track changes over time. and are used to track changes over time. Examples of time series data include:
Examples of time series data include:
- Industrial sensor data - Industrial sensor data
- Server performance metrics - Server performance metrics
@ -28,14 +28,14 @@ Examples of time series data include:
- Rainfall measurements - Rainfall measurements
- Stock prices - Stock prices
This multi-part tutorial walks you through writing time series data to {{% product-name %}}, This multi-part tutorial walks you through writing time series data to
querying, and then visualizing that data. {{% product-name %}}, querying, and then visualizing that data.
## Key concepts before you get started ## Key concepts before you get started
Before you get started using InfluxDB, it's important to understand how time series Before you get started using InfluxDB, it's important to understand how time
data is organized and stored in InfluxDB and some key definitions that are used series data is organized and stored in InfluxDB and some key definitions that
throughout this documentation. are used throughout this documentation.
- [Data organization](#data-organization) - [Data organization](#data-organization)
- [Schema on write](#schema-on-write) - [Schema on write](#schema-on-write)
@ -44,43 +44,53 @@ throughout this documentation.
### Data organization ### Data organization
The {{% product-name %}} data model organizes time series data into databases The {{% product-name %}} data model organizes time series data into databases
and measurements. and tables.
A database can contain multiple measurements. A database can contain multiple tables.
Measurements contain multiple tags and fields. Tables contain multiple tags and fields.
- **Database**: Named location where time series data is stored. - **Database**: A named location where time series data is stored in _tables_.
A database can contain multiple _measurements_. _Database_ is synonymous with _bucket_ in InfluxDB Cloud Serverless and InfluxDB TSM.
- **Measurement**: Logical grouping for time series data. - **Table**: A logical grouping for time series data. All _points_ in a given
All _points_ in a given measurement should have the same _tags_. table should have the same _tags_. A table contains _tags_ and
A measurement contains multiple _tags_ and _fields_. _fields_. _Table_ is synonymous with _measurement_ in InfluxDB Cloud
- **Tags**: Key-value pairs that provide metadata for each point--for example, Serverless and InfluxDB TSM.
something to identify the source or context of the data like host, - **Tags**: Key-value pairs that provide metadata for each point--for
location, station, etc. example, something to identify the source or context of the data like
Tag values may be null. host, location, station, etc. Tag values may be null.
- **Fields**: Key-value pairs with values that change over time--for example, - **Fields**: Key-value pairs with values that change over time--for
temperature, pressure, stock price, etc. example, temperature, pressure, stock price, etc. Field values may be
Field values may be null, but at least one field value is not null on any given row. null, but at least one field value is not null on any given row.
- **Timestamp**: Timestamp associated with the data. - **Timestamp**: Timestamp associated with the data. When stored on disk and
When stored on disk and queried, all data is ordered by time. queried, all data is ordered by time. A timestamp is never null.
A timestamp is never null.
{{% note %}}
#### What about buckets and measurements?
If coming from InfluxDB Cloud Serverless or InfluxDB powered by the TSM storage
engine, you're likely familiar with the concepts _bucket_ and _measurement_.
_Bucket_ in TSM or InfluxDB Cloud Serverless is synonymous with _database_ in
{{% product-name %}}. _Measurement_ in TSM or InfluxDB Cloud Serverless is
synonymous with _table_ in {{% product-name %}}.
{{% /note %}}
### Schema on write ### Schema on write
When using InfluxDB, you define your schema as you write your data. As you write data to InfluxDB, the data defines the table schema. You don't need
You don't need to create measurements (equivalent to a relational table) or to create tables or explicitly define the table schema.
explicitly define the schema of the measurement.
Measurement schemas are defined by the schema of data as it is written to the measurement.
### Important definitions ### Important definitions
The following definitions are important to understand when using InfluxDB: The following definitions are important to understand when using InfluxDB:
- **Point**: Single data record identified by its _measurement, tag keys, tag values, field key, and timestamp_. - **Point**: Single data record identified by its _measurement, tag keys, tag
- **Series**: A group of points with the same _measurement, tag keys and values, and field key_. values, field key, and timestamp_.
- **Primary key**: Columns used to uniquely identify each row in a table. - **Series**: A group of points with the same _measurement, tag keys and values,
Rows are uniquely identified by their _timestamp and tag set_. and field key_.
A row's primary key _tag set_ does not include tags with null values. - **Primary key**: Columns used to uniquely identify each row in a table. Rows
are uniquely identified by their _timestamp and tag set_. A row's primary key
_tag set_ does not include tags with null values.
##### Example InfluxDB query results ##### Example InfluxDB query results
@ -88,8 +98,8 @@ The following definitions are important to understand when using InfluxDB:
## Tools to use ## Tools to use
The following table compares tools that you can use to interact with {{% product-name %}}. The following table compares tools that you can use to interact with
This tutorial covers many of the recommended tools. {{% product-name %}}. This tutorial covers many of the recommended tools.
| Tool | Administration | Write | Query | | Tool | Administration | Write | Query |
| :-------------------------------------------------------------------------------------------------- | :----------------------: | :----------------------: | :----------------------: | | :-------------------------------------------------------------------------------------------------- | :----------------------: | :----------------------: | :----------------------: |
@ -114,39 +124,52 @@ This tutorial covers many of the recommended tools.
{{< /caption >}} {{< /caption >}}
{{% warn %}} {{% warn %}}
Avoid using the `influx` CLI with {{% product-name %}}. Avoid using the `influx` CLI with {{% product-name %}}. While it
While it may coincidentally work, it isn't supported. may coincidentally work, it isn't supported.
{{% /warn %}} {{% /warn %}}
### `influxctl` CLI ### `influxctl` CLI
The [`influxctl` command line interface (CLI)](/influxdb/cloud-dedicated/reference/cli/influxctl/) The
[`influxctl` command line interface (CLI)](/influxdb/cloud-dedicated/reference/cli/influxctl/)
writes, queries, and performs administrative tasks, such as managing databases writes, queries, and performs administrative tasks, such as managing databases
and authorization tokens in a cluster. and authorization tokens in a cluster.
### `influx3` data CLI ### `influx3` data CLI
The [`influx3` data CLI](/influxdb/cloud-dedicated/get-started/query/?t=influx3+CLI#execute-an-sql-query) is a community-maintained tool that lets you write and query data in {{% product-name %}} from a command line. The
It uses the HTTP API to write data and uses Flight gRPC to query data. [`influx3` data CLI](/influxdb/cloud-dedicated/get-started/query/?t=influx3+CLI#execute-an-sql-query)
is a community-maintained tool that lets you write and query data in
{{% product-name %}} from a command line. It uses the HTTP API to write data and
uses Flight gRPC to query data.
### InfluxDB HTTP API ### InfluxDB HTTP API
The [InfluxDB HTTP API](/influxdb/v2/reference/api/) provides a simple way to let you manage {{% product-name %}} and write and query data using HTTP(S) clients. The [InfluxDB HTTP API](/influxdb/v2/reference/api/) provides a simple way to
Examples in this tutorial use cURL, but any HTTP(S) client will work. let you manage {{% product-name %}} and write and query data using HTTP(S)
clients. Examples in this tutorial use cURL, but any HTTP(S) client will work.
The `/write` and `/query` v1-compatible endpoints work with the username/password authentication schemes and existing InfluxDB 1.x tools and code. The `/write` and `/query` v1-compatible endpoints work with the
The `/api/v2/write` v2-compatible endpoint works with existing InfluxDB 2.x tools and code. username/password authentication schemes and existing InfluxDB 1.x tools and
code. The `/api/v2/write` v2-compatible endpoint works with existing InfluxDB
2.x tools and code.
### InfluxDB client libraries ### InfluxDB client libraries
InfluxDB client libraries are community-maintained, language-specific clients that interact with InfluxDB APIs. InfluxDB client libraries are community-maintained, language-specific clients
that interact with InfluxDB APIs.
[InfluxDB v3 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v3/) are the recommended client libraries for writing and querying data {{% product-name %}}. [InfluxDB v3 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v3/)
They use the HTTP API to write data and use Flight gRPC to query data. are the recommended client libraries for writing and querying data
{{% product-name %}}. They use the HTTP API to write data and use InfluxDB's
Flight gRPC API to query data.
[InfluxDB v2 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v2/) can use `/api/v2` HTTP endpoints to manage resources such as buckets and API tokens, and write data in {{% product-name %}}. [InfluxDB v2 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v2/)
can use `/api/v2` HTTP endpoints to manage resources such as buckets and API
tokens, and write data in {{% product-name %}}.
[InfluxDB v1 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v1/) can write data to {{% product-name %}}. [InfluxDB v1 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v1/)
can write data to {{% product-name %}}.
## Authorization ## Authorization
@ -158,13 +181,14 @@ There are two types of tokens:
- **Database token**: A token that grants read and write access to InfluxDB - **Database token**: A token that grants read and write access to InfluxDB
databases. databases.
- **Management token**: A short-lived (1 hour) [Auth0 token](#) used to - **Management token**: A short-lived (1 hour) [Auth0 token](#) used to
administer your InfluxDB cluster. administer your InfluxDB cluster. These are generated by the `influxctl` CLI
These are generated by the `influxctl` CLI and do not require any direct management. and do not require any direct management. Management tokens authorize a user
Management tokens authorize a user to perform tasks related to: to perform tasks related to:
- Account management - Account management
- Database management - Database management
- Database token management - Database token management
- Pricing - Pricing
<!-- - Infrastructure management --> <!-- - Infrastructure management -->
{{< page-nav next="/influxdb/cloud-dedicated/get-started/setup/" >}}
{{< page-nav next="/influxdb/clustered/get-started/setup/" >}}

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,8 @@
--- ---
title: Use InfluxDB client libraries to write line protocol data title: Use InfluxDB client libraries to write line protocol data
description: > description: >
Use InfluxDB API clients to write line protocol data to InfluxDB Cloud Dedicated. Use InfluxDB API clients to write points as line protocol data to InfluxDB
Cloud Dedicated.
menu: menu:
influxdb_cloud_dedicated: influxdb_cloud_dedicated:
name: Use client libraries name: Use client libraries
@ -13,24 +14,36 @@ related:
- /influxdb/cloud-dedicated/get-started/write/ - /influxdb/cloud-dedicated/get-started/write/
--- ---
Use InfluxDB client libraries to build line protocol, and then write it to an Use InfluxDB client libraries to build time series points, and then write them
InfluxDB database. line protocol to an {{% product-name %}} database.
- [Construct line protocol](#construct-line-protocol) - [Construct line protocol](#construct-line-protocol)
- [Example home schema](#example-home-schema)
- [Set up your project](#set-up-your-project) - [Set up your project](#set-up-your-project)
- [Construct points and write line protocol](#construct-points-and-write-line-protocol) - [Construct points and write line protocol](#construct-points-and-write-line-protocol)
- [Run the example](#run-the-example)
- [Home sensor data line protocol](#home-sensor-data-line-protocol)
## Construct line protocol ## Construct line protocol
With a [basic understanding of line protocol](/influxdb/cloud-dedicated/write-data/line-protocol/), With a
you can now construct line protocol and write data to InfluxDB. [basic understanding of line protocol](/influxdb/cloud-dedicated/write-data/line-protocol/),
Consider a use case where you collect data from sensors in your home. you can construct line protocol data and write it to InfluxDB.
Each sensor collects temperature, humidity, and carbon monoxide readings.
All InfluxDB client libraries write data in line protocol format to InfluxDB.
Client library `write` methods let you provide data as raw line protocol or as
`Point` objects that the client library converts to line protocol. If your
program creates the data you write to InfluxDB, use the client library `Point`
interface to take advantage of type safety in your program.
### Example home schema
Consider a use case where you collect data from sensors in your home. Each
sensor collects temperature, humidity, and carbon monoxide readings.
To collect this data, use the following schema: To collect this data, use the following schema:
- **measurement**: `home` <!-- vale InfluxDataDocs.v3Schema = NO -->
- **measurement**: `home`
- **tags** - **tags**
- `room`: Living Room or Kitchen - `room`: Living Room or Kitchen
- **fields** - **fields**
@ -39,337 +52,427 @@ To collect this data, use the following schema:
- `co`: carbon monoxide in parts per million (integer) - `co`: carbon monoxide in parts per million (integer)
- **timestamp**: Unix timestamp in _second_ precision - **timestamp**: Unix timestamp in _second_ precision
The following example shows how to construct and write points that follow this schema. <!-- vale InfluxDataDocs.v3Schema = YES -->
The following example shows how to construct and write points that follow the
`home` schema.
## Set up your project ## Set up your project
The examples in this guide assume you followed [Set up InfluxDB](/influxdb/cloud-dedicated/get-started/setup/) The examples in this guide assume you followed
and [Write data set up](/influxdb/cloud-dedicated/get-started/write/#set-up-your-project-and-credentials) [Set up InfluxDB](/influxdb/cloud-dedicated/get-started/setup/) and
[Write data set up](/influxdb/cloud-dedicated/get-started/write/#set-up-your-project-and-credentials)
instructions in [Get started](/influxdb/cloud-dedicated/get-started/). instructions in [Get started](/influxdb/cloud-dedicated/get-started/).
After setting up InfluxDB and your project, you should have the following: After setting up InfluxDB and your project, you should have the following:
- {{< product-name >}} credentials: - {{< product-name >}} credentials:
- [Database](/influxdb/cloud-dedicated/admin/databases/) - [Database](/influxdb/cloud-dedicated/admin/databases/)
- [Database token](/influxdb/cloud-dedicated/admin/tokens/#database-tokens) - [Database token](/influxdb/cloud-dedicated/admin/tokens/#database-tokens)
- Cluster hostname - Cluster hostname
- A directory for your project. - A directory for your project.
- Credentials stored as environment variables or in a project configuration file--for example, a `.env` ("dotenv") file. - Credentials stored as environment variables or in a project configuration
file--for example, a `.env` ("dotenv") file.
- Client libraries installed for writing data to InfluxDB. - Client libraries installed for writing data to InfluxDB.
The following example shows how to construct `Point` objects that follow the [example `home` schema](#example-home-schema), and then write the points as line protocol to an The following example shows how to construct `Point` objects that follow the
{{% product-name %}} database. [example `home` schema](#example-home-schema), and then write the data as line
protocol to an {{% product-name %}} database.
The examples use InfluxDB v3 client libraries. For examples using InfluxDB v2
client libraries to write data to InfluxDB v3, see
[InfluxDB v2 clients](/influxdb/cloud-dedicated/reference/client-libraries/v2/).
{{< tabs-wrapper >}} {{< tabs-wrapper >}}
{{% tabs %}} {{% tabs %}}
<!-- prettier-ignore -->
[Go](#) [Go](#)
[Node.js](#) [Node.js](#)
[Python](#) [Python](#)
{{% /tabs %}} {{% /tabs %}}
{{% tab-content %}} {{% tab-content %}}
The following steps set up a Go project using the
[InfluxDB v3 Go client](https://github.com/InfluxCommunity/influxdb3-go/):
<!-- BEGIN GO PROJECT SETUP --> <!-- BEGIN GO PROJECT SETUP -->
1. Install [Go 1.13 or later](https://golang.org/doc/install). 1. Install [Go 1.13 or later](https://golang.org/doc/install).
2. Inside of your project directory, install the client package to your project dependencies. 1. Create a directory for your Go module and change to the directory--for
example:
```sh ```sh
go get github.com/influxdata/influxdb-client-go/v2 mkdir iot-starter-go && cd $_
``` ```
1. Initialize a Go module--for example:
```sh
go mod init iot-starter
```
1. Install [`influxdb3-go`](https://github.com/InfluxCommunity/influxdb3-go/),
which provides the InfluxDB `influxdb3` Go client library module.
```sh
go get github.com/InfluxCommunity/influxdb3-go
```
<!-- END GO SETUP PROJECT --> <!-- END GO SETUP PROJECT -->
{{% /tab-content %}}
{{% tab-content %}} {{% /tab-content %}} {{% tab-content %}}
<!-- BEGIN NODE.JS PROJECT SETUP --> <!-- BEGIN NODE.JS PROJECT SETUP -->
Inside of your project directory, install the `@influxdata/influxdb-client` InfluxDB v2 JavaScript client library. The following steps set up a JavaScript project using the
[InfluxDB v3 JavaScript client](https://github.com/InfluxCommunity/influxdb3-js/).
```sh 1. Install [Node.js](https://nodejs.org/en/download/).
npm install --save @influxdata/influxdb-client
``` 1. Create a directory for your JavaScript project and change to the
directory--for example:
```sh
mkdir -p iot-starter-js && cd $_
```
1. Initialize a project--for example, using `npm`:
<!-- pytest.mark.skip -->
```sh
npm init
```
1. Install the `@influxdata/influxdb3-client` InfluxDB v3 JavaScript client
library.
```sh
npm install @influxdata/influxdb3-client
```
<!-- END NODE.JS SETUP PROJECT --> <!-- END NODE.JS SETUP PROJECT -->
{{% /tab-content %}}
{{% tab-content %}} {{% /tab-content %}} {{% tab-content %}}
<!-- BEGIN PYTHON SETUP PROJECT --> <!-- BEGIN PYTHON SETUP PROJECT -->
1. **Optional, but recommended**: Use [`venv`](https://docs.python.org/3/library/venv.html)) or [`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual environment for installing and executing code--for example: The following steps set up a Python project using the
[InfluxDB v3 Python client](https://github.com/InfluxCommunity/influxdb3-python/):
Inside of your project directory, enter the following command using `venv` to create and activate a virtual environment for the project: 1. Install [Python](https://www.python.org/downloads/)
```sh 1. Inside of your project directory, create a directory for your Python module
python3 -m venv envs/env1 && source ./envs/env1/bin/activate and change to the module directory--for example:
```
2. Install the [`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python), which provides the InfluxDB `influxdb_client_3` Python client library module and also installs the [`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for working with Arrow data. ```sh
mkdir -p iot-starter-py && cd $_
```
```sh 1. **Optional, but recommended**: Use
pip install influxdb3-python [`venv`](https://docs.python.org/3/library/venv.html) or
``` [`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual
environment for installing and executing code--for example, enter the
following command using `venv` to create and activate a virtual environment
for the project:
```bash
python3 -m venv envs/iot-starter && source ./envs/iot-starter/bin/activate
```
1. Install
[`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python),
which provides the InfluxDB `influxdb_client_3` Python client library module
and also installs the
[`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for
working with Arrow data.
```sh
pip install influxdb3-python
```
<!-- END PYTHON SETUP PROJECT --> <!-- END PYTHON SETUP PROJECT -->
{{% /tab-content %}} {{% /tab-content %}}
{{< /tabs-wrapper >}} {{< /tabs-wrapper >}}
## Construct points and write line protocol ## Construct points and write line protocol
Client libraries provide one or more `Point` constructor methods. Some libraries
support language-native data structures, such as Go's `struct`, for creating
points.
{{< tabs-wrapper >}} {{< tabs-wrapper >}}
{{% tabs %}} {{% tabs %}}
<!-- prettier-ignore -->
[Go](#) [Go](#)
[Node.js](#) [Node.js](#)
[Python](#) [Python](#)
{{% /tabs %}} {{% /tabs %}}
{{% tab-content %}} {{% tab-content %}}
<!-- BEGIN GO SETUP SAMPLE --> <!-- BEGIN GO SETUP SAMPLE -->
1. Create a file for your module--for example: `write-point.go`. 1. Create a file for your module--for example: `main.go`.
2. In `write-point.go`, enter the following sample code: 1. In `main.go`, enter the following sample code:
```go ```go
package main package main
import ( import (
"os" "context"
"time" "os"
"fmt" "fmt"
"github.com/influxdata/influxdb-client-go/v2" "time"
) "github.com/InfluxCommunity/influxdb3-go/influxdb3"
"github.com/influxdata/line-protocol/v2/lineprotocol"
)
func main() { func Write() error {
// Set a log level constant url := os.Getenv("INFLUX_HOST")
const debugLevel uint = 4 token := os.Getenv("INFLUX_TOKEN")
database := os.Getenv("INFLUX_DATABASE")
/** // To instantiate a client, call New() with InfluxDB credentials.
* Define options for the client. client, err := influxdb3.New(influxdb3.ClientConfig{
* Instantiate the client with the following arguments: Host: url,
* - An object containing InfluxDB URL and token credentials. Token: token,
* - Write options for batch size and timestamp precision. Database: database,
})
/** Use a deferred function to ensure the client is closed when the
* function returns.
**/ **/
clientOptions := influxdb2.DefaultOptions(). defer func (client *influxdb3.Client) {
SetBatchSize(20). err = client.Close()
SetLogLevel(debugLevel). if err != nil {
SetPrecision(time.Second) panic(err)
}
}(client)
client := influxdb2.NewClientWithOptions(os.Getenv("INFLUX_URL"), /** Use the NewPoint method to construct a point.
os.Getenv("INFLUX_TOKEN"), * NewPoint(measurement, tags map, fields map, time)
clientOptions)
/**
* Create an asynchronous, non-blocking write client.
* Provide your InfluxDB org and database as arguments
**/ **/
writeAPI := client.WriteAPI(os.Getenv("INFLUX_ORG"), "get-started") point := influxdb3.NewPoint("home",
map[string]string{
"room": "Living Room",
},
map[string]any{
"temp": 24.5,
"hum": 40.5,
"co": 15i},
time.Now(),
)
// Get the errors channel for the asynchronous write client. /** Use the NewPointWithMeasurement method to construct a point with
errorsCh := writeAPI.Errors() * method chaining.
/** Create a point.
* Provide measurement, tags, and fields as arguments.
**/ **/
p := influxdb2.NewPointWithMeasurement("home"). point2 := influxdb3.NewPointWithMeasurement("home").
AddTag("room", "Kitchen"). SetTag("room", "Living Room").
AddField("temp", 72.0). SetField("temp", 23.5).
AddField("hum", 20.2). SetField("hum", 38.0).
AddField("co", 9). SetField("co", 16i).
SetTime(time.Now()) SetTimestamp(time.Now())
// Define a proc for handling errors.
go func() {
for err := range errorsCh {
fmt.Printf("write error: %s\n", err.Error())
}
}()
// Write the point asynchronously fmt.Println("Writing points")
writeAPI.WritePoint(p) points := []*influxdb3.Point{point, point2}
// Send pending writes from the buffer to the database. /** Write points to InfluxDB.
writeAPI.Flush() * You can specify WriteOptions, such as Gzip threshold,
* default tags, and timestamp precision. Default precision is lineprotocol.Nanosecond
**/
err = client.WritePoints(context.Background(), points,
influxdb3.WithPrecision(lineprotocol.Second))
return nil
}
func main() {
Write()
}
```
1. To run the module and write the data to your {{% product-name %}} database,
enter the following command in your terminal:
<!-- pytest.mark.skip -->
```sh
go run main.go
```
<!-- END GO SAMPLE -->
{{% /tab-content %}} {{% tab-content %}}
// Ensure background processes finish and release resources.
client.Close()
}
```
<!-- END GO SETUP SAMPLE -->
{{% /tab-content %}}
{{% tab-content %}}
<!-- BEGIN NODE.JS SETUP SAMPLE --> <!-- BEGIN NODE.JS SETUP SAMPLE -->
1. Create a file for your module--for example: `write-point.js`. 1. Create a file for your module--for example: `write-points.js`.
2. In `write-point.js`, enter the following sample code: 1. In `write-points.js`, enter the following sample code:
```js ```js
'use strict' // write-points.js
/** @module write import { InfluxDBClient, Point } from '@influxdata/influxdb3-client';
* Use the JavaScript client library for Node.js. to create a point and write it to InfluxDB
**/
import {InfluxDB, Point} from '@influxdata/influxdb-client' /**
* Set InfluxDB credentials.
*/
const host = process.env.INFLUX_HOST ?? '';
const database = process.env.INFLUX_DATABASE;
const token = process.env.INFLUX_TOKEN;
/** Get credentials from the environment **/ /**
const url = process.env.INFLUX_URL * Write line protocol to InfluxDB using the JavaScript client library.
const token = process.env.INFLUX_TOKEN */
const org = process.env.INFLUX_ORG export async function writePoints() {
/**
* Instantiate an InfluxDBClient.
* Provide the host URL and the database token.
*/
const client = new InfluxDBClient({ host, token });
/** /** Use the fluent interface with chained methods to construct Points. */
* Instantiate a client with a configuration object const point = Point.measurement('home')
* that contains your InfluxDB URL and token. .setTag('room', 'Living Room')
**/ .setFloatField('temp', 22.2)
const influxDB = new InfluxDB({url, token}) .setFloatField('hum', 35.5)
.setIntegerField('co', 7)
.setTimestamp(new Date().getTime() / 1000);
/** const point2 = Point.measurement('home')
* Create a write client configured to write to the database. .setTag('room', 'Kitchen')
* Provide your InfluxDB org and database. .setFloatField('temp', 21.0)
**/ .setFloatField('hum', 35.9)
const writeApi = influxDB.getWriteApi(org, 'get-started') .setIntegerField('co', 0)
.setTimestamp(new Date().getTime() / 1000);
/** /** Write points to InfluxDB.
* Create a point and add tags and fields. * The write method accepts an array of points, the target database, and
* To add a field, call the field method for your data type. * an optional configuration object.
**/ * You can specify WriteOptions, such as Gzip threshold, default tags,
const point1 = new Point('home') * and timestamp precision. Default precision is lineprotocol.Nanosecond
.tag('room', 'Kitchen') **/
.floatField('temp', 72.0)
.floatField('hum', 20.2)
.intField('co', 9)
console.log(` ${point1}`)
/** try {
* Add the point to the batch. await client.write([point, point2], database, '', { precision: 's' });
**/ console.log('Data has been written successfully!');
writeApi.writePoint(point1) } catch (error) {
console.error(`Error writing data to InfluxDB: ${error.body}`);
}
/** client.close();
* Flush pending writes in the batch from the buffer and close the write client. }
**/
writeApi.close().then(() => {
console.log('WRITE FINISHED')
})
```
<!-- END NODE.JS SETUP SAMPLE -->
{{% /tab-content %}}
{{% tab-content %}}
<!-- BEGIN PYTHON SETUP SAMPLE -->
1. Create a file for your module--for example: `write-point.py`. writePoints();
```
2. In `write-point.py`, enter the following sample code to write data in batching mode: 1. To run the module and write the data to your {{\< product-name >}} database,
enter the following command in your terminal:
```python <!-- pytest.mark.skip -->
import os
from influxdb_client_3 import Point, write_client_options, WritePrecision, WriteOptions, InfluxDBError
# Create an array of points with tags and fields. ```sh
points = [Point("home") node writePoints.js
.tag("room", "Kitchen") ```
.field("temp", 25.3)
.field('hum', 20.2)
.field('co', 9)]
# With batching mode, define callbacks to execute after a successful or failed write request. <!-- END NODE.JS SAMPLE -->
# Callback methods receive the configuration and data sent in the request.
def success(self, data: str):
print(f"Successfully wrote batch: data: {data}")
def error(self, data: str, exception: InfluxDBError): {{% /tab-content %}} {{% tab-content %}}
print(f"Failed writing batch: config: {self}, data: {data} due: {exception}")
def retry(self, data: str, exception: InfluxDBError): <!-- BEGIN PYTHON SETUP SAMPLE -->
print(f"Failed retry writing batch: config: {self}, data: {data} retry: {exception}")
# Configure options for batch writing. 1. Create a file for your module--for example: `write-points.py`.
write_options = WriteOptions(batch_size=500,
flush_interval=10_000,
jitter_interval=2_000,
retry_interval=5_000,
max_retries=5,
max_retry_delay=30_000,
exponential_base=2)
# Create an options dict that sets callbacks and WriteOptions. 1. In `write-points.py`, enter the following sample code to write data in
wco = write_client_options(success_callback=success, batching mode:
error_callback=error,
retry_callback=retry,
WriteOptions=write_options)
# Instantiate a synchronous instance of the client with your ```python
# InfluxDB credentials and write options. import os
with InfluxDBClient3(host=config['INFLUX_HOST'], from influxdb_client_3 import (
token=config['INFLUX_TOKEN'], InfluxDBClient3, InfluxDBError, Point, WritePrecision,
database=config['INFLUX_DATABASE'], WriteOptions, write_client_options)
write_client_options=wco) as client:
client.write(points, write_precision='s') host = os.getenv('INFLUX_HOST')
``` token = os.getenv('INFLUX_TOKEN')
<!-- END PYTHON SETUP PROJECT --> database = os.getenv('INFLUX_DATABASE')
{{% /tab-content %}}
{{< /tabs-wrapper >}} # Create an array of points with tags and fields.
points = [Point("home")
.tag("room", "Kitchen")
.field("temp", 25.3)
.field('hum', 20.2)
.field('co', 9)]
# With batching mode, define callbacks to execute after a successful or
# failed write request.
# Callback methods receive the configuration and data sent in the request.
def success(self, data: str):
print(f"Successfully wrote batch: data: {data}")
def error(self, data: str, exception: InfluxDBError):
print(f"Failed writing batch: config: {self}, data: {data} due: {exception}")
def retry(self, data: str, exception: InfluxDBError):
print(f"Failed retry writing batch: config: {self}, data: {data} retry: {exception}")
# Configure options for batch writing.
write_options = WriteOptions(batch_size=500,
flush_interval=10_000,
jitter_interval=2_000,
retry_interval=5_000,
max_retries=5,
max_retry_delay=30_000,
exponential_base=2)
# Create an options dict that sets callbacks and WriteOptions.
wco = write_client_options(success_callback=success,
error_callback=error,
retry_callback=retry,
write_options=write_options)
# Instantiate a synchronous instance of the client with your
# InfluxDB credentials and write options, such as Gzip threshold, default tags,
# and timestamp precision. Default precision is nanosecond ('ns').
with InfluxDBClient3(host=host,
token=token,
database=database,
write_client_options=wco) as client:
client.write(points, write_precision='s')
```
1. To run the module and write the data to your {{< product-name >}} database,
enter the following command in your terminal:
<!-- pytest.mark.skip -->
```sh
python write-points.py
```
<!-- END PYTHON SETUP PROJECT -->
{{% /tab-content %}} {{< /tabs-wrapper >}}
The sample code does the following: The sample code does the following:
1. Instantiates a client configured with the InfluxDB URL and API token. <!-- vale InfluxDataDocs.v3Schema = NO -->
2. Uses the client to instantiate a **write client** with credentials. 1. Instantiates a client configured with the InfluxDB URL and API token.
1. Constructs `home`
[measurement](/influxdb/cloud-dedicated/reference/glossary/#measurement)
`Point` objects.
1. Sends data as line protocol format to InfluxDB and waits for the response.
1. If the write succeeds, logs the success message to stdout; otherwise, logs
the failure message and error details.
1. Closes the client to release resources.
3. Constructs a `Point` object with the [measurement](/influxdb/cloud-dedicated/reference/glossary/#measurement) name (`"home"`). <!-- vale InfluxDataDocs.v3Schema = YES -->
4. Adds a tag and fields to the point.
5. Adds the point to a batch to be written to the database.
6. Sends the batch to InfluxDB and waits for the response.
7. Executes callbacks for the response, flushes the write buffer, and releases resources.
## Run the example
To run the sample and write the data to your InfluxDB Cloud Dedicated database, enter the following command in your terminal:
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
[Go](#)
[Node.js](#)
[Python](#)
{{% /code-tabs %}}
{{% code-tab-content %}}
<!-- BEGIN GO RUN EXAMPLE -->
```sh
go run write-point.go
```
<!-- END GO RUN EXAMPLE -->
{{% /code-tab-content %}}
{{% code-tab-content %}}
<!-- BEGIN NODE.JS RUN EXAMPLE -->
```sh
node write-point.js
```
<!-- END NODE.JS RUN EXAMPLE -->
{{% /code-tab-content %}}
{{% code-tab-content %}}
<!-- BEGIN PYTHON RUN EXAMPLE -->
```sh
python write-point.py
```
<!-- END PYTHON RUN EXAMPLE -->
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
The example logs the point as line protocol to stdout, and then writes the point to the database.
The line protocol is similar to the following:
### Home sensor data line protocol
```sh
home,room=Kitchen co=9i,hum=20.2,temp=72 1641024000
```

View File

@ -10,17 +10,16 @@ weight: 3
influxdb/clustered/tags: [get-started] influxdb/clustered/tags: [get-started]
--- ---
{{% product-name %}} is a highly available InfluxDB cluster hosted and InfluxDB is the platform purpose-built to collect, store, and query
managed on your own infrastructure and is the platform purpose-built to collect, time series data.
store, and query time series data. {{% product-name %}} is powered by the InfluxDB 3.0 storage engine, that
It is powered by the InfluxDB 3.0 storage engine which provides a number of provides nearly unlimited series cardinality,
benefits including nearly unlimited series cardinality, improved query performance, improved query performance, and interoperability with widely used data
and interoperability with widely used data processing tools and platforms. processing tools and platforms.
**Time series data** is a sequence of data points indexed in time order. **Time series data** is a sequence of data points indexed in time order. Data
Data points typically consist of successive measurements made from the same points typically consist of successive measurements made from the same source
source and are used to track changes over time. and are used to track changes over time. Examples of time series data include:
Examples of time series data include:
- Industrial sensor data - Industrial sensor data
- Server performance metrics - Server performance metrics
@ -45,33 +44,43 @@ throughout this documentation.
### Data organization ### Data organization
The {{% product-name %}} data model organizes time series data into databases The {{% product-name %}} data model organizes time series data into databases
and measurements. and tables.
A database can contain multiple measurements. A database can contain multiple tables.
Measurements contain multiple tags and fields. Tables contain multiple tags and fields.
- **Database**: Named location where time series data is stored. - **Database**: A named location where time series data is stored in _tables_.
A database can contain multiple _measurements_. _Database_ is synonymous with _bucket_ in InfluxDB Cloud Serverless and InfluxDB TSM.
- **Measurement**: Logical grouping for time series data. - **Table**: A logical grouping for time series data. All _points_ in a given
All _points_ in a given measurement should have the same _tags_. table should have the same _tags_. A table contains _tags_ and
A measurement contains multiple _tags_ and _fields_. _fields_. _Table_ is synonymous with _measurement_ in InfluxDB Cloud
- **Tags**: Key-value pairs that provide metadata for each point--for example, Serverless and InfluxDB TSM.
something to identify the source or context of the data like host, - **Tags**: Key-value pairs that provide metadata for each point--for
location, station, etc. example, something to identify the source or context of the data like
Tag values may be null. host, location, station, etc. Tag values may be null.
- **Fields**: Key-value pairs with values that change over time--for example, - **Fields**: Key-value pairs with values that change over time--for
temperature, pressure, stock price, etc. example, temperature, pressure, stock price, etc. Field values may be
Field values may be null, but at least one field value is not null on any given row. null, but at least one field value is not null on any given row.
- **Timestamp**: Timestamp associated with the data. - **Timestamp**: Timestamp associated with the data. When stored on disk and
When stored on disk and queried, all data is ordered by time. queried, all data is ordered by time. A timestamp is never null.
A timestamp is never null.
{{% note %}}
#### What about buckets and measurements?
If coming from InfluxDB Cloud Serverless or InfluxDB powered by the TSM storage engine, you're likely familiar
with the concepts _bucket_ and _measurement_.
_Bucket_ in TSM or InfluxDB Cloud Serverless is synonymous with
_database_ in {{% product-name %}}.
_Measurement_ in TSM or InfluxDB Cloud Serverless is synonymous with
_table_ in {{% product-name %}}.
{{% /note %}}
### Schema on write ### Schema on write
When using InfluxDB, you define your schema as you write your data. As you write data to InfluxDB, the data defines the table schema.
You don't need to create measurements (equivalent to a relational table) or You don't need to create tables or
explicitly define the schema of the measurement. explicitly define the table schema.
Measurement schemas are defined by the schema of data as it is written to the measurement.
### Important definitions ### Important definitions
@ -121,7 +130,7 @@ While it may coincidentally work, it isn't supported.
### `influxctl` admin CLI ### `influxctl` admin CLI
The [`influxctl` command line interface (CLI)](/influxdb/cloud-dedicated/reference/cli/influxctl/) The [`influxctl` command line interface (CLI)](/influxdb/clustered/reference/cli/influxctl/)
writes, queries, and performs administrative tasks, such as managing databases writes, queries, and performs administrative tasks, such as managing databases
and authorization tokens in a cluster. and authorization tokens in a cluster.
@ -143,7 +152,7 @@ The `/api/v2/write` v2-compatible endpoint works with existing InfluxDB 2.x tool
InfluxDB client libraries are community-maintained, language-specific clients that interact with InfluxDB APIs. InfluxDB client libraries are community-maintained, language-specific clients that interact with InfluxDB APIs.
[InfluxDB v3 client libraries](/influxdb/clustered/reference/client-libraries/v3/) are the recommended client libraries for writing and querying data {{% product-name %}}. [InfluxDB v3 client libraries](/influxdb/clustered/reference/client-libraries/v3/) are the recommended client libraries for writing and querying data {{% product-name %}}.
They use the HTTP API to write data and use Flight gRPC to query data. They use the HTTP API to write data and use InfluxDB's Flight gRPC API to query data.
[InfluxDB v2 client libraries](/influxdb/clustered/reference/client-libraries/v2/) can use `/api/v2` HTTP endpoints to manage resources such as buckets and API tokens, and write data in {{% product-name %}}. [InfluxDB v2 client libraries](/influxdb/clustered/reference/client-libraries/v2/) can use `/api/v2` HTTP endpoints to manage resources such as buckets and API tokens, and write data in {{% product-name %}}.
@ -162,7 +171,7 @@ There are two types of tokens:
administer your InfluxDB cluster. administer your InfluxDB cluster.
These are generated by the `influxctl` CLI and do not require any direct management. These are generated by the `influxctl` CLI and do not require any direct management.
Management tokens authorize a user to perform tasks related to: Management tokens authorize a user to perform tasks related to:
- Account management - Account management
- Database management - Database management
- Database token management - Database token management

File diff suppressed because it is too large Load Diff

View File

@ -1,375 +0,0 @@
---
title: Use InfluxDB client libraries to write line protocol data
description: >
Use InfluxDB API clients to write line protocol data to InfluxDB Clustered.
menu:
influxdb_clustered:
name: Use client libraries
parent: Write line protocol
identifier: write-client-libs
weight: 103
related:
- /influxdb/clustered/reference/syntax/line-protocol/
- /influxdb/clustered/get-started/write/
---
Use InfluxDB client libraries to build line protocol, and then write it to an
InfluxDB database.
- [Construct line protocol](#construct-line-protocol)
- [Set up your project](#set-up-your-project)
- [Construct points and write line protocol](#construct-points-and-write-line-protocol)
- [Run the example](#run-the-example)
- [Home sensor data line protocol](#home-sensor-data-line-protocol)
## Construct line protocol
With a [basic understanding of line protocol](/influxdb/clustered/write-data/line-protocol/),
you can now construct line protocol and write data to InfluxDB.
Consider a use case where you collect data from sensors in your home.
Each sensor collects temperature, humidity, and carbon monoxide readings.
To collect this data, use the following schema:
- **measurement**: `home`
- **tags**
- `room`: Living Room or Kitchen
- **fields**
- `temp`: temperature in °C (float)
- `hum`: percent humidity (float)
- `co`: carbon monoxide in parts per million (integer)
- **timestamp**: Unix timestamp in _second_ precision
The following example shows how to construct and write points that follow this schema.
## Set up your project
The examples in this guide assume you followed [Set up InfluxDB](/influxdb/clustered/get-started/setup/)
and [Write data set up](/influxdb/clustered/get-started/write/#set-up-your-project-and-credentials)
instructions in [Get started](/influxdb/clustered/get-started/).
After setting up InfluxDB and your project, you should have the following:
- {{< product-name >}} credentials:
- [Database](/influxdb/clustered/admin/databases/)
- [Database token](/influxdb/clustered/admin/tokens/#database-tokens)
- Cluster hostname
- A directory for your project.
- Credentials stored as environment variables or in a project configuration file--for example, a `.env` ("dotenv") file.
- Client libraries installed for writing data to InfluxDB.
The following example shows how to construct `Point` objects that follow the [example `home` schema](#example-home-schema), and then write the points as line protocol to an
{{% product-name %}} database.
{{< tabs-wrapper >}}
{{% tabs %}}
[Go](#)
[Node.js](#)
[Python](#)
{{% /tabs %}}
{{% tab-content %}}
<!-- BEGIN GO PROJECT SETUP -->
1. Install [Go 1.13 or later](https://golang.org/doc/install).
2. Inside of your project directory, install the client package to your project dependencies.
```sh
go get github.com/influxdata/influxdb-client-go/v2
```
<!-- END GO SETUP PROJECT -->
{{% /tab-content %}}
{{% tab-content %}}
<!-- BEGIN NODE.JS PROJECT SETUP -->
Inside of your project directory, install the `@influxdata/influxdb-client` InfluxDB v2 JavaScript client library.
```sh
npm install --save @influxdata/influxdb-client
```
<!-- END NODE.JS SETUP PROJECT -->
{{% /tab-content %}}
{{% tab-content %}}
<!-- BEGIN PYTHON SETUP PROJECT -->
1. **Optional, but recommended**: Use [`venv`](https://docs.python.org/3/library/venv.html)) or [`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual environment for installing and executing code--for example:
Inside of your project directory, enter the following command using `venv` to create and activate a virtual environment for the project:
```sh
python3 -m venv envs/env1 && source ./envs/env1/bin/activate
```
2. Install the [`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python), which provides the InfluxDB `influxdb_client_3` Python client library module and also installs the [`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for working with Arrow data.
```sh
pip install influxdb3-python
```
<!-- END PYTHON SETUP PROJECT -->
{{% /tab-content %}}
{{< /tabs-wrapper >}}
## Construct points and write line protocol
{{< tabs-wrapper >}}
{{% tabs %}}
[Go](#)
[Node.js](#)
[Python](#)
{{% /tabs %}}
{{% tab-content %}}
<!-- BEGIN GO SETUP SAMPLE -->
1. Create a file for your module--for example: `write-point.go`.
2. In `write-point.go`, enter the following sample code:
```go
package main
import (
"os"
"time"
"fmt"
"github.com/influxdata/influxdb-client-go/v2"
)
func main() {
// Set a log level constant
const debugLevel uint = 4
/**
* Define options for the client.
* Instantiate the client with the following arguments:
* - An object containing InfluxDB URL and token credentials.
* - Write options for batch size and timestamp precision.
**/
clientOptions := influxdb2.DefaultOptions().
SetBatchSize(20).
SetLogLevel(debugLevel).
SetPrecision(time.Second)
client := influxdb2.NewClientWithOptions(os.Getenv("INFLUX_URL"),
os.Getenv("INFLUX_TOKEN"),
clientOptions)
/**
* Create an asynchronous, non-blocking write client.
* Provide your InfluxDB org and database as arguments
**/
writeAPI := client.WriteAPI(os.Getenv("INFLUX_ORG"), "get-started")
// Get the errors channel for the asynchronous write client.
errorsCh := writeAPI.Errors()
/** Create a point.
* Provide measurement, tags, and fields as arguments.
**/
p := influxdb2.NewPointWithMeasurement("home").
AddTag("room", "Kitchen").
AddField("temp", 72.0).
AddField("hum", 20.2).
AddField("co", 9).
SetTime(time.Now())
// Define a proc for handling errors.
go func() {
for err := range errorsCh {
fmt.Printf("write error: %s\n", err.Error())
}
}()
// Write the point asynchronously
writeAPI.WritePoint(p)
// Send pending writes from the buffer to the database.
writeAPI.Flush()
// Ensure background processes finish and release resources.
client.Close()
}
```
<!-- END GO SETUP SAMPLE -->
{{% /tab-content %}}
{{% tab-content %}}
<!-- BEGIN NODE.JS SETUP SAMPLE -->
1. Create a file for your module--for example: `write-point.js`.
2. In `write-point.js`, enter the following sample code:
```js
'use strict'
/** @module write
* Use the JavaScript client library for Node.js. to create a point and write it to InfluxDB
**/
import {InfluxDB, Point} from '@influxdata/influxdb-client'
/** Get credentials from the environment **/
const url = process.env.INFLUX_URL
const token = process.env.INFLUX_TOKEN
const org = process.env.INFLUX_ORG
/**
* Instantiate a client with a configuration object
* that contains your InfluxDB URL and token.
**/
const influxDB = new InfluxDB({url, token})
/**
* Create a write client configured to write to the database.
* Provide your InfluxDB org and database.
**/
const writeApi = influxDB.getWriteApi(org, 'get-started')
/**
* Create a point and add tags and fields.
* To add a field, call the field method for your data type.
**/
const point1 = new Point('home')
.tag('room', 'Kitchen')
.floatField('temp', 72.0)
.floatField('hum', 20.2)
.intField('co', 9)
console.log(` ${point1}`)
/**
* Add the point to the batch.
**/
writeApi.writePoint(point1)
/**
* Flush pending writes in the batch from the buffer and close the write client.
**/
writeApi.close().then(() => {
console.log('WRITE FINISHED')
})
```
<!-- END NODE.JS SETUP SAMPLE -->
{{% /tab-content %}}
{{% tab-content %}}
<!-- BEGIN PYTHON SETUP SAMPLE -->
1. Create a file for your module--for example: `write-point.py`.
2. In `write-point.py`, enter the following sample code to write data in batching mode:
```python
import os
from influxdb_client_3 import Point, write_client_options, WritePrecision, WriteOptions, InfluxDBError
# Create an array of points with tags and fields.
points = [Point("home")
.tag("room", "Kitchen")
.field("temp", 25.3)
.field('hum', 20.2)
.field('co', 9)]
# With batching mode, define callbacks to execute after a successful or failed write request.
# Callback methods receive the configuration and data sent in the request.
def success(self, data: str):
print(f"Successfully wrote batch: data: {data}")
def error(self, data: str, exception: InfluxDBError):
print(f"Failed writing batch: config: {self}, data: {data} due: {exception}")
def retry(self, data: str, exception: InfluxDBError):
print(f"Failed retry writing batch: config: {self}, data: {data} retry: {exception}")
# Configure options for batch writing.
write_options = WriteOptions(batch_size=500,
flush_interval=10_000,
jitter_interval=2_000,
retry_interval=5_000,
max_retries=5,
max_retry_delay=30_000,
exponential_base=2)
# Create an options dict that sets callbacks and WriteOptions.
wco = write_client_options(success_callback=success,
error_callback=error,
retry_callback=retry,
WriteOptions=write_options)
# Instantiate a synchronous instance of the client with your
# InfluxDB credentials and write options.
with InfluxDBClient3(host=config['INFLUX_HOST'],
token=config['INFLUX_TOKEN'],
database=config['INFLUX_DATABASE'],
write_client_options=wco) as client:
client.write(points, write_precision='s')
```
<!-- END PYTHON SETUP PROJECT -->
{{% /tab-content %}}
{{< /tabs-wrapper >}}
The sample code does the following:
1. Instantiates a client configured with the InfluxDB URL and API token.
2. Uses the client to instantiate a **write client** with credentials.
3. Constructs a `Point` object with the [measurement](/influxdb/clustered/reference/glossary/#measurement) name (`"home"`).
4. Adds a tag and fields to the point.
5. Adds the point to a batch to be written to the database.
6. Sends the batch to InfluxDB and waits for the response.
7. Executes callbacks for the response, flushes the write buffer, and releases resources.
## Run the example
To run the sample and write the data to your InfluxDB Clustered database, enter the following command in your terminal:
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
[Go](#)
[Node.js](#)
[Python](#)
{{% /code-tabs %}}
{{% code-tab-content %}}
<!-- BEGIN GO RUN EXAMPLE -->
```sh
go run write-point.go
```
<!-- END GO RUN EXAMPLE -->
{{% /code-tab-content %}}
{{% code-tab-content %}}
<!-- BEGIN NODE.JS RUN EXAMPLE -->
```sh
node write-point.js
```
<!-- END NODE.JS RUN EXAMPLE -->
{{% /code-tab-content %}}
{{% code-tab-content %}}
<!-- BEGIN PYTHON RUN EXAMPLE -->
```sh
python write-point.py
```
<!-- END PYTHON RUN EXAMPLE -->
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
The example logs the point as line protocol to stdout, and then writes the point to the database.
The line protocol is similar to the following:
### Home sensor data line protocol
```sh
home,room=Kitchen co=9i,hum=20.2,temp=72 1641024000
```

View File

@ -1,165 +0,0 @@
---
title: Use the influxctl CLI to write line protocol data
description: >
Use the [`influxctl` CLI](/influxdb/clustered/reference/cli/influxctl/)
to write line protocol data to InfluxDB Clustered.
menu:
influxdb_clustered:
name: Use the influxctl CLI
parent: Write line protocol
identifier: write-influxctl
weight: 101
related:
- /influxdb/clustered/reference/cli/influxctl/write/
- /influxdb/clustered/reference/syntax/line-protocol/
- /influxdb/clustered/get-started/write/
---
Use the [`influxctl` CLI](/influxdb/clustered/reference/cli/influxctl/)
to write line protocol data to {{< product-name >}}.
- [Construct line protocol](#construct-line-protocol)
- [Write the line protocol to InfluxDB](#write-the-line-protocol-to-influxdb)
## Construct line protocol
With a [basic understanding of line protocol](/influxdb/clustered/write-data/line-protocol/),
you can now construct line protocol and write data to InfluxDB.
Consider a use case where you collect data from sensors in your home.
Each sensor collects temperature, humidity, and carbon monoxide readings.
To collect this data, use the following schema:
- **measurement**: `home`
- **tags**
- `room`: Living Room or Kitchen
- **fields**
- `temp`: temperature in °C (float)
- `hum`: percent humidity (float)
- `co`: carbon monoxide in parts per million (integer)
- **timestamp**: Unix timestamp in _second_ precision
The following line protocol represent the schema described above:
```
home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000
home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000
home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600
home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600
home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200
home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200
home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800
home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800
home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400
home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400
home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000
home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000
```
For this tutorial, you can either pass this line protocol directly to the
`influxctl write` command as a string, via `stdin`, or you can save it to and read
it from a file.
## Write the line protocol to InfluxDB
Use the [`influxctl write` command](/influxdb/clustered/reference/cli/influxctl/write/)
to write the [home sensor sample data](#home-sensor-data-line-protocol) to your
{{< product-name omit=" Clustered" >}} cluster.
Provide the following:
- The [database](/influxdb/clustered/admin/databases/) name using the `--database` flag
- A [database token](/influxdb/clustered/admin/tokens/#database-tokens) (with write permissions
on the target database) using the `--token` flag
- The timestamp precision as seconds (`s`) using the `--precision` flag
- [Line protocol](#construct-line-protocol).
Pass the line protocol in one of the following ways:
- a string on the command line
- a path to a file that contains the query
- a single dash (`-`) to read the query from stdin
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
[string](#)
[file](#)
[stdin](#)
{{% /code-tabs %}}
{{% code-tab-content %}}
{{% influxdb/custom-timestamps %}}
{{% code-placeholders "DATABASE_(NAME|TOKEN)|(LINE_PROTOCOL_FILEPATH)" %}}
```sh
influxctl write \
--database DATABASE_NAME \
--token DATABASE_TOKEN \
--precision s \
'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000
home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000
home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600
home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600
home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200
home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200
home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800
home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800
home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400
home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400
home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000
home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000'
```
{{% /code-placeholders %}}
{{% /influxdb/custom-timestamps %}}
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Name of the database to write to.
- {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}:
Database token with write permissions on the target database.
{{% /code-tab-content %}}
{{% code-tab-content %}}
{{% code-placeholders "DATABASE_(NAME|TOKEN)|(LINE_PROTOCOL_FILEPATH)" %}}
```sh
influxctl write \
--database DATABASE_NAME \
--token DATABASE_TOKEN \
--precision s \
LINE_PROTOCOL_FILEPATH
```
{{% /code-placeholders %}}
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Name of the database to write to.
- {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}:
Database token with write permissions on the target database.
- {{% code-placeholder-key %}}`LINE_PROTOCOL_FILEPATH`{{% /code-placeholder-key %}}:
File path to the file containing the line protocol. Can be an absolute file path
or relative to the current working directory.
{{% /code-tab-content %}}
{{% code-tab-content %}}
{{% code-placeholders "DATABASE_(NAME|TOKEN)|(LINE_PROTOCOL_FILEPATH)" %}}
```sh
cat LINE_PROTOCOL_FILEPATH | influxctl write \
--database DATABASE_NAME \
--token DATABASE_TOKEN \
--precision s \
-
```
{{% /code-placeholders %}}
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Name of the database to write to.
- {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}:
Database token with write permissions on the target database.
- {{% code-placeholder-key %}}`LINE_PROTOCOL_FILEPATH`{{% /code-placeholder-key %}}:
File path to the file containing the line protocol. Can be an absolute file path
or relative to the current working directory.
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}

View File

@ -43,7 +43,7 @@ Each line of line protocol contains the following elements:
{{< req type="key" >}} {{< req type="key" >}}
- {{< req "\*" >}} **measurement**: String that identifies the [measurement](/influxdb/clustered/reference/glossary/#measurement) to store the data in. - {{< req "\*" >}} **measurement**: A string that identifies the [table](/influxdb/clustered/reference/glossary/#table) to store the data in.
- **tag set**: Comma-delimited list of key value pairs, each representing a tag. - **tag set**: Comma-delimited list of key value pairs, each representing a tag.
Tag keys and values are unquoted strings. _Spaces, commas, and equal characters must be escaped._ Tag keys and values are unquoted strings. _Spaces, commas, and equal characters must be escaped._
- {{< req "\*" >}} **field set**: Comma-delimited list of key value pairs, each representing a field. - {{< req "\*" >}} **field set**: Comma-delimited list of key value pairs, each representing a field.

View File

@ -0,0 +1,463 @@
---
title: Use InfluxDB client libraries to write line protocol data
description: >
Use InfluxDB API clients to write points as line protocol data to InfluxDB
Clustered.
menu:
influxdb_clustered:
name: Use client libraries
parent: Write line protocol
identifier: write-client-libs
weight: 103
related:
- /influxdb/clustered/reference/syntax/line-protocol/
- /influxdb/clustered/get-started/write/
---
Use InfluxDB client libraries to build time series points, and then write them
line protocol to an {{% product-name %}} database.
- [Construct line protocol](#construct-line-protocol)
- [Example home schema](#example-home-schema)
- [Set up your project](#set-up-your-project)
- [Construct points and write line protocol](#construct-points-and-write-line-protocol)
## Construct line protocol
With a
[basic understanding of line protocol](/influxdb/clustered/write-data/line-protocol/),
you can construct line protocol data and write it to InfluxDB.
All InfluxDB client libraries write data in line protocol format to InfluxDB.
Client library `write` methods let you provide data as raw line protocol or as
`Point` objects that the client library converts to line protocol. If your
program creates the data you write to InfluxDB, use the client library `Point`
interface to take advantage of type safety in your program.
### Example home schema
Consider a use case where you collect data from sensors in your home. Each
sensor collects temperature, humidity, and carbon monoxide readings.
To collect this data, use the following schema:
<!-- vale InfluxDataDocs.v3Schema = NO -->
- **measurement**: `home`
- **tags**
- `room`: Living Room or Kitchen
- **fields**
- `temp`: temperature in °C (float)
- `hum`: percent humidity (float)
- `co`: carbon monoxide in parts per million (integer)
- **timestamp**: Unix timestamp in _second_ precision
<!-- vale InfluxDataDocs.v3Schema = YES -->
The following example shows how to construct and write points that follow the
`home` schema.
## Set up your project
The examples in this guide assume you followed
[Set up InfluxDB](/influxdb/clustered/get-started/setup/) and
[Write data set up](/influxdb/clustered/get-started/write/#set-up-your-project-and-credentials)
instructions in [Get started](/influxdb/clustered/get-started/).
After setting up InfluxDB and your project, you should have the following:
- {{< product-name >}} credentials:
- [Database](/influxdb/clustered/admin/databases/)
- [Database token](/influxdb/clustered/admin/tokens/#database-tokens)
- Cluster hostname
- A directory for your project.
- Credentials stored as environment variables or in a project configuration
file--for example, a `.env` ("dotenv") file.
- Client libraries installed for writing data to InfluxDB.
The following example shows how to construct `Point` objects that follow the
[example `home` schema](#example-home-schema), and then write the data as line
protocol to an {{% product-name %}} database.
The examples use InfluxDB v3 client libraries. For examples using InfluxDB v2
client libraries to write data to InfluxDB v3, see
[InfluxDB v2 clients](/influxdb/clustered/reference/client-libraries/v2/).
{{< tabs-wrapper >}} {{% tabs %}} [Go](#) [Node.js](#) [Python](#) {{% /tabs %}}
{{% tab-content %}}
The following steps set up a Go project using the
[InfluxDB v3 Go client](https://github.com/InfluxCommunity/influxdb3-go/):
<!-- BEGIN GO PROJECT SETUP -->
1. Install [Go 1.13 or later](https://golang.org/doc/install).
1. Create a directory for your Go module and change to the directory--for
example:
```sh
mkdir iot-starter-go && cd $_
```
1. Initialize a Go module--for example:
```sh
go mod init iot-starter
```
1. Install [`influxdb3-go`](https://github.com/InfluxCommunity/influxdb3-go/),
which provides the InfluxDB `influxdb3` Go client library module.
```sh
go get github.com/InfluxCommunity/influxdb3-go
```
<!-- END GO SETUP PROJECT -->
{{% /tab-content %}} {{% tab-content %}}
<!-- BEGIN NODE.JS PROJECT SETUP -->
The following steps set up a JavaScript project using the
[InfluxDB v3 JavaScript client](https://github.com/InfluxCommunity/influxdb3-js/).
1. Install [Node.js](https://nodejs.org/en/download/).
1. Create a directory for your JavaScript project and change to the
directory--for example:
```sh
mkdir -p iot-starter-js && cd $_
```
1. Initialize a project--for example, using `npm`:
<!-- pytest.mark.skip -->
```sh
npm init
```
1. Install the `@influxdata/influxdb3-client` InfluxDB v3 JavaScript client
library.
```sh
npm install @influxdata/influxdb3-client
```
<!-- END NODE.JS SETUP PROJECT -->
{{% /tab-content %}} {{% tab-content %}}
<!-- BEGIN PYTHON SETUP PROJECT -->
The following steps set up a Python project using the
[InfluxDB v3 Python client](https://github.com/InfluxCommunity/influxdb3-python/):
1. Install [Python](https://www.python.org/downloads/)
1. Inside of your project directory, create a directory for your Python module
and change to the module directory--for example:
```sh
mkdir -p iot-starter-py && cd $_
```
1. **Optional, but recommended**: Use
[`venv`](https://docs.python.org/3/library/venv.html) or
[`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual
environment for installing and executing code--for example, enter the
following command using `venv` to create and activate a virtual environment
for the project:
```bash
python3 -m venv envs/iot-starter && source ./envs/iot-starter/bin/activate
```
1. Install
[`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python),
which provides the InfluxDB `influxdb_client_3` Python client library module
and also installs the
[`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for
working with Arrow data.
```sh
pip install influxdb3-python
```
<!-- END PYTHON SETUP PROJECT -->
{{% /tab-content %}} {{< /tabs-wrapper >}}
## Construct points and write line protocol
Client libraries provide one or more `Point` constructor methods. Some libraries
support language-native data structures, such as Go's `struct`, for creating
points.
{{< tabs-wrapper >}} {{% tabs %}} [Go](#) [Node.js](#) [Python](#) {{% /tabs %}}
{{% tab-content %}}
<!-- BEGIN GO SETUP SAMPLE -->
1. Create a file for your module--for example: `main.go`.
1. In `main.go`, enter the following sample code:
```go
package main
import (
"context"
"os"
"fmt"
"time"
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
"github.com/influxdata/line-protocol/v2/lineprotocol"
)
func Write() error {
url := os.Getenv("INFLUX_HOST")
token := os.Getenv("INFLUX_TOKEN")
database := os.Getenv("INFLUX_DATABASE")
// To instantiate a client, call New() with InfluxDB credentials.
client, err := influxdb3.New(influxdb3.ClientConfig{
Host: url,
Token: token,
Database: database,
})
/** Use a deferred function to ensure the client is closed when the
* function returns.
**/
defer func (client *influxdb3.Client) {
err = client.Close()
if err != nil {
panic(err)
}
}(client)
/** Use the NewPoint method to construct a point.
* NewPoint(measurement, tags map, fields map, time)
**/
point := influxdb3.NewPoint("home",
map[string]string{
"room": "Living Room",
},
map[string]any{
"temp": 24.5,
"hum": 40.5,
"co": 15i},
time.Now(),
)
/** Use the NewPointWithMeasurement method to construct a point with
* method chaining.
**/
point2 := influxdb3.NewPointWithMeasurement("home").
SetTag("room", "Living Room").
SetField("temp", 23.5).
SetField("hum", 38.0).
SetField("co", 16i).
SetTimestamp(time.Now())
fmt.Println("Writing points")
points := []*influxdb3.Point{point, point2}
/** Write points to InfluxDB.
* You can specify WriteOptions, such as Gzip threshold,
* default tags, and timestamp precision. Default precision is lineprotocol.Nanosecond
**/
err = client.WritePoints(context.Background(), points,
influxdb3.WithPrecision(lineprotocol.Second))
return nil
}
func main() {
Write()
}
```
1. To run the module and write the data to your {{% product-name %}} database,
enter the following command in your terminal:
<!-- pytest.mark.skip -->
```sh
go run main.go
```
<!-- END GO SAMPLE -->
{{% /tab-content %}} {{% tab-content %}}
<!-- BEGIN NODE.JS SETUP SAMPLE -->
1. Create a file for your module--for example: `write-points.js`.
1. In `write-points.js`, enter the following sample code:
```js
// write-points.js
import { InfluxDBClient, Point } from '@influxdata/influxdb3-client';
/**
* Set InfluxDB credentials.
*/
const host = process.env.INFLUX_HOST ?? '';
const database = process.env.INFLUX_DATABASE;
const token = process.env.INFLUX_TOKEN;
/**
* Write line protocol to InfluxDB using the JavaScript client library.
*/
export async function writePoints() {
/**
* Instantiate an InfluxDBClient.
* Provide the host URL and the database token.
*/
const client = new InfluxDBClient({ host, token });
/** Use the fluent interface with chained methods to construct Points. */
const point = Point.measurement('home')
.setTag('room', 'Living Room')
.setFloatField('temp', 22.2)
.setFloatField('hum', 35.5)
.setIntegerField('co', 7)
.setTimestamp(new Date().getTime() / 1000);
const point2 = Point.measurement('home')
.setTag('room', 'Kitchen')
.setFloatField('temp', 21.0)
.setFloatField('hum', 35.9)
.setIntegerField('co', 0)
.setTimestamp(new Date().getTime() / 1000);
/** Write points to InfluxDB.
* The write method accepts an array of points, the target database, and
* an optional configuration object.
* You can specify WriteOptions, such as Gzip threshold, default tags,
* and timestamp precision. Default precision is lineprotocol.Nanosecond
**/
try {
await client.write([point, point2], database, '', { precision: 's' });
console.log('Data has been written successfully!');
} catch (error) {
console.error(`Error writing data to InfluxDB: ${error.body}`);
}
client.close();
}
writePoints();
```
1. To run the module and write the data to your {{\< product-name >}} database,
enter the following command in your terminal:
<!-- pytest.mark.skip -->
```sh
node writePoints.js
```
<!-- END NODE.JS SAMPLE -->
{{% /tab-content %}} {{% tab-content %}}
<!-- BEGIN PYTHON SETUP SAMPLE -->
1. Create a file for your module--for example: `write-points.py`.
1. In `write-points.py`, enter the following sample code to write data in
batching mode:
```python
import os
from influxdb_client_3 import (
InfluxDBClient3, InfluxDBError, Point, WritePrecision,
WriteOptions, write_client_options)
host = os.getenv('INFLUX_HOST')
token = os.getenv('INFLUX_TOKEN')
database = os.getenv('INFLUX_DATABASE')
# Create an array of points with tags and fields.
points = [Point("home")
.tag("room", "Kitchen")
.field("temp", 25.3)
.field('hum', 20.2)
.field('co', 9)]
# With batching mode, define callbacks to execute after a successful or
# failed write request.
# Callback methods receive the configuration and data sent in the request.
def success(self, data: str):
print(f"Successfully wrote batch: data: {data}")
def error(self, data: str, exception: InfluxDBError):
print(f"Failed writing batch: config: {self}, data: {data} due: {exception}")
def retry(self, data: str, exception: InfluxDBError):
print(f"Failed retry writing batch: config: {self}, data: {data} retry: {exception}")
# Configure options for batch writing.
write_options = WriteOptions(batch_size=500,
flush_interval=10_000,
jitter_interval=2_000,
retry_interval=5_000,
max_retries=5,
max_retry_delay=30_000,
exponential_base=2)
# Create an options dict that sets callbacks and WriteOptions.
wco = write_client_options(success_callback=success,
error_callback=error,
retry_callback=retry,
write_options=write_options)
# Instantiate a synchronous instance of the client with your
# InfluxDB credentials and write options, such as Gzip threshold, default tags,
# and timestamp precision. Default precision is nanosecond ('ns').
with InfluxDBClient3(host=host,
token=token,
database=database,
write_client_options=wco) as client:
client.write(points, write_precision='s')
```
1. To run the module and write the data to your {{< product-name >}} database,
enter the following command in your terminal:
<!-- pytest.mark.skip -->
```sh
python write-points.py
```
<!-- END PYTHON SETUP PROJECT -->
{{% /tab-content %}} {{< /tabs-wrapper >}}
The sample code does the following:
<!-- vale InfluxDataDocs.v3Schema = NO -->
1. Instantiates a client configured with the InfluxDB URL and API token.
1. Constructs `home`
[measurement](/influxdb/clustered/reference/glossary/#measurement)
`Point` objects.
1. Sends data as line protocol format to InfluxDB and waits for the response.
1. If the write succeeds, logs the success message to stdout; otherwise, logs
the failure message and error details.
1. Closes the client to release resources.
<!-- vale InfluxDataDocs.v3Schema = YES -->

View File

@ -5,7 +5,7 @@
"description": "InfluxDB documentation", "description": "InfluxDB documentation",
"license": "MIT", "license": "MIT",
"devDependencies": { "devDependencies": {
"@vvago/vale": "^3.0.7", "@vvago/vale": "^3.4.2",
"autoprefixer": ">=10.2.5", "autoprefixer": ">=10.2.5",
"hugo-extended": ">=0.101.0", "hugo-extended": ">=0.101.0",
"husky": "^9.0.11", "husky": "^9.0.11",
@ -20,13 +20,14 @@
}, },
"scripts": { "scripts": {
"prepare": "husky", "prepare": "husky",
"test": "./test.sh" "lint-vale": ".ci/vale/vale.sh",
"lint-staged": "lint-staged --relative"
}, },
"lint-staged": { "main": "index.js",
"*.{js,css,md}": "prettier --write", "module": "main.js",
"content/influxdb/cloud-dedicated/**/*.md": "npx vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error --output=line", "directories": {
"content/influxdb/cloud-serverless/**/*.md": "npx vale --config=content/influxdb/cloud-serverless/.vale.ini --minAlertLevel=error --output=line", "test": "test"
"content/influxdb/clustered/**/*.md": "npx vale --config=content/influxdb/clustered/.vale.ini --minAlertLevel=error --output=line", },
"content/influxdb/{cloud,v2,telegraf}/**/*.md": "npx vale --config=.vale.ini --minAlertLevel=error --output=line" "keywords": [],
} "author": ""
} }

View File

@ -1,94 +0,0 @@
# If you need more help, visit the Dockerfile reference guide at
# https://docs.docker.com/engine/reference/builder/
# Starting from a Go base image is easier than setting up the Go environment later.
FROM golang:latest
RUN apt-get update && apt-get upgrade -y && apt-get install -y \
curl \
git \
gpg \
jq \
maven \
nodejs \
npm \
wget
# Install test runner dependencies
RUN apt-get install -y \
python3 \
python3-pip \
python3-venv
RUN ln -s /usr/bin/python3 /usr/bin/python
# Create a virtual environment for Python to avoid conflicts with the system Python and having to use the --break-system-packages flag when installing packages with pip.
RUN python -m venv /opt/venv
# Enable venv
ENV PATH="/opt/venv/bin:$PATH"
# Prevents Python from writing pyc files.
ENV PYTHONDONTWRITEBYTECODE=1
# the application crashes without emitting any logs due to buffering.
ENV PYTHONUNBUFFERED=1
# RUN --mount=type=cache,target=/root/.cache/node_modules \
# --mount=type=bind,source=package.json,target=package.json \
# npm install
# Copy docs test directory to the image.
WORKDIR /usr/src/app
RUN chmod -R 755 .
ARG SOURCE_DIR
COPY data ./data
# Install parse_yaml.sh and parse YAML config files into dotenv files to be used by tests.
RUN /bin/bash -c 'curl -sO https://raw.githubusercontent.com/mrbaseman/parse_yaml/master/src/parse_yaml.sh'
RUN /bin/bash -c 'source ./parse_yaml.sh && parse_yaml ./data/products.yml > .env.products'
COPY test ./test
WORKDIR /usr/src/app/test
COPY shared/fixtures ./tmp/data
# Some Python test dependencies (pytest-dotenv and pytest-codeblocks) aren't
# available as packages in apt-cache, so use pip to download dependencies in a # separate step and use Docker's caching.
# Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
# Leverage a bind mount to requirements.txt to avoid having to copy them into
# this layer.
RUN --mount=type=cache,target=/root/.cache/pip \
--mount=type=bind,source=test/requirements.txt,target=requirements.txt \
pip install -Ur requirements.txt
COPY test/setup/run-tests.sh /usr/local/bin/run-tests.sh
RUN chmod +x /usr/local/bin/run-tests.sh
# Install Telegraf for use in tests.
# Follow the install instructions (https://docs.influxdata.com/telegraf/v1/install/?t=curl), except for sudo (which isn't available in Docker).
# influxdata-archive_compat.key GPG fingerprint:
# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
RUN wget -q https://repos.influxdata.com/influxdata-archive_compat.key
RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list
RUN apt-get update && apt-get install telegraf
# Install influx v2 Cloud CLI for use in tests.
# Follow the install instructions(https://portal.influxdata.com/downloads/), except for sudo (which isn't available in Docker).
# influxdata-archive_compat.key GPG fingerprint:
# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
RUN wget -q https://repos.influxdata.com/influxdata-archive_compat.key
RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list
RUN apt-get update && apt-get install influxdb2-cli
ENV TEMP_DIR=./tmp
ENTRYPOINT [ "run-tests.sh" ]
CMD [""]

66
test.sh
View File

@ -1,66 +0,0 @@
#! /bin/bash
# Path: test.sh
# Description:
# This script is used to copy content files for testing and to run tests on tests on those temporary copies.
# The temporary files are shared between the host and the Docker container
# using a bind mount configured in compose.yaml.
#
# Docker compose now has an experimental file watch feature
# (https://docs.docker.com/compose/file-watch/) that is likely preferable to the
# strategy here.
#
# Usage:
# The default behavior is to test all *.md files that have been added or modified in the current branch, effectively:
#
# `git diff --name-only --diff-filter=AM --relative master | grep -E '\.md$' | ./test.sh`
#
# To specify files to test, in your terminal command line, pass a file pattern as the only argument to the script--for example:
#
# sh test.sh ./content/**/*.md
##
paths="$1"
target=./test/tmp
testrun=./test/.test-run.txt
mkdir -p "$target"
cat /dev/null > "$testrun"
rm -rf "$target"/*
# Check if the user provided a path to copy.
if [ -z "$paths" ]; then
echo "No path provided. Running tests for *.md files that have been added or modified in the current branch."
paths=$(git diff --name-only --diff-filter=AM HEAD | \
grep -E '\.md$')
if [ -z "$paths" ]; then
echo "No files found for pattern: $paths"
exit 1
fi
else
paths=$(find "$paths" -type f -name '*.md')
fi
# Log the list of files to be tested and copy them to the test directory.
echo "$paths" >> "$testrun"
echo "$paths" | rsync -arv --files-from=- . "$target"
# Build or rebuild a service if the Dockerfile or build directory have changed, and then run the tests.
docker compose up test
# Troubleshoot tests
# If you want to examine files or run commands for debugging tests,
# start the container and use `exec` to open an interactive shell--for example:
# docker compose run -it --entrypoint=/bin/bash test
# To build and run a new container and debug test failures, use `docker compose run` which runs a one-off command in a new container. Pass additional flags to be used by the container's entrypoint and the test runners it executes--for example:
# docker compose run --rm test -v
# docker compose run --rm test --entrypoint /bin/bash
# Or, pass the flags in the compose file--for example:
# services:
# test:
# build:...
# command: ["-vv"]

View File

@ -8,9 +8,10 @@
**/__pycache__ **/__pycache__
**/.venv **/.venv
**/.classpath **/.classpath
**/.config.toml
**/.dockerignore **/.dockerignore
**/.env **/.env
**/.env.influxdbv3 **/.env.*
**/.git **/.git
**/.gitignore **/.gitignore
**/.project **/.project
@ -23,6 +24,7 @@
**/*.jfm **/*.jfm
**/bin **/bin
**/charts **/charts
**/config.toml
**/docker-compose* **/docker-compose*
**/compose* **/compose*
**/Dockerfile* **/Dockerfile*

3
test/.gitignore vendored
View File

@ -1,8 +1,11 @@
/target /target
/Cargo.lock /Cargo.lock
config.toml
content content
node_modules node_modules
tmp tmp
.config*
.env* .env*
**/.env.test
.pytest_cache .pytest_cache
.test-run.txt .test-run.txt

View File

@ -1,116 +0,0 @@
#!/bin/bash
# This script is used to run tests for the InfluxDB documentation.
# The script is designed to be run in a Docker container. It is used to substitute placeholder values.
# Function to check if an option is present in the arguments
has_option() {
local target="$1"
shift
for arg in "$@"; do
if [ "$arg" == "$target" ]; then
return 0
fi
done
return 1
}
verbose=0
# Check if "--option" is present in the CMD arguments
if has_option "-v" "$@"; then
verbose=1
echo "Using verbose mode..."
fi
BASE_DIR=$(pwd)
cd $TEMP_DIR
for file in `find . -type f \( -iname '*.md' \)` ; do
if [ -f "$file" ]; then
echo "PRETEST: substituting values in $file"
# Replaces placeholder values with environment variable references.
# Non-language-specific replacements.
sed -i 's|https:\/\/{{< influxdb/host >}}|$INFLUX_HOST|g;
' $file
# Python-specific replacements.
# Use f-strings to identify placeholders in Python while also keeping valid syntax if
# the user replaces the value.
# Remember to import os for your example code.
sed -i 's/f"DATABASE_TOKEN"/os.getenv("INFLUX_TOKEN")/g;
s/f"API_TOKEN"/os.getenv("INFLUX_TOKEN")/g;
s/f"BUCKET_NAME"/os.getenv("INFLUX_DATABASE")/g;
s/f"DATABASE_NAME"/os.getenv("INFLUX_DATABASE")/g;
s|f"{{< influxdb/host >}}"|os.getenv("INFLUX_HOSTNAME")|g;
s|f"RETENTION_POLICY_NAME\|RETENTION_POLICY"|"autogen"|g;
' $file
# Shell-specific replacements.
## In JSON Heredoc
sed -i 's|"orgID": "ORG_ID"|"orgID": "$INFLUX_ORG"|g;
s|"name": "BUCKET_NAME"|"name": "$INFLUX_DATABASE"|g;' \
$file
sed -i 's/API_TOKEN/$INFLUX_TOKEN/g;
s/ORG_ID/$INFLUX_ORG/g;
s/DATABASE_TOKEN/$INFLUX_TOKEN/g;
s/--bucket-id BUCKET_ID/--bucket-id $INFLUX_BUCKET_ID/g;
s/BUCKET_NAME/$INFLUX_DATABASE/g;
s/DATABASE_NAME/$INFLUX_DATABASE/g;
s/--id DBRP_ID/--id $INFLUX_DBRP_ID/g;
s/get-started/$INFLUX_DATABASE/g;
s/RETENTION_POLICY_NAME\|RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g;
s/CONFIG_NAME/CONFIG_$(shuf -i 0-100 -n1)/g;' \
$file
# v2-specific replacements.
sed -i 's|https:\/\/us-west-2-1.aws.cloud2.influxdata.com|$INFLUX_HOST|g;
s|{{< latest-patch >}}|${influxdb_latest_patches_v2}|g;
s|{{< latest-patch cli=true >}}|${influxdb_latest_cli_v2}|g;' \
$file
# Skip package manager commands.
sed -i 's|sudo dpkg.*$||g;
s|sudo yum.*$||g;' \
$file
# Environment-specific replacements.
sed -i 's|sudo ||g;' \
$file
fi
if [ $verbose -eq 1 ]; then
echo "FILE CONTENTS:"
cat $file
fi
done
# Miscellaneous test setup.
# For macOS samples.
mkdir -p ~/Downloads && rm -rf ~/Downloads/*
# Clean up installed files from previous runs.
gpg -q --batch --yes --delete-key D8FF8E1F7DF8B07E > /dev/null 2>&1
# Activate the Python virtual environment configured in the Dockerfile.
. /opt/venv/bin/activate
# List installed Python dependencies.
pip list
# Run test commands with options provided in the CMD of the Dockerfile.
# pytest rootdir is the directory where pytest.ini is located (/test).
if [ -d ./content/influxdb/cloud-dedicated/ ]; then
echo "Running content/influxdb/cloud-dedicated tests..."
pytest --codeblocks --envfile $BASE_DIR/.env.dedicated ./content/influxdb/cloud-dedicated/ $@
fi
if [ -d ./content/influxdb/cloud-serverless/ ]; then
echo "Running content/influxdb/cloud-serverless tests..."
pytest --codeblocks --envfile $BASE_DIR/.env.serverless ./content/influxdb/cloud-serverless/ $@
fi
if [ -d ./content/telegraf/ ]; then
echo "Running content/telegraf tests..."
pytest --codeblocks --envfile $BASE_DIR/.env.telegraf ./content/telegraf/ $@
fi

105
test/src/prepare-content.sh Normal file
View File

@ -0,0 +1,105 @@
#!/bin/bash
# This script is used to run tests for the InfluxDB documentation.
# The script is designed to be run in a Docker container. It is used to substitute placeholder values in test files.
TEST_CONTENT="/app/content"
function substitute_placeholders {
for file in `find "$TEST_CONTENT" -type f \( -iname '*.md' \)`; do
if [ -f "$file" ]; then
# echo "PRETEST: substituting values in $file"
# Replaces placeholder values with environment variable references.
# Non-language-specific replacements.
sed -i 's|https:\/\/{{< influxdb/host >}}|$INFLUX_HOST|g;
' $file
# Python-specific replacements.
# Use f-strings to identify placeholders in Python while also keeping valid syntax if
# the user replaces the value.
# Remember to import os for your example code.
sed -i 's/f"DATABASE_TOKEN"/os.getenv("INFLUX_TOKEN")/g;
s/f"API_TOKEN"/os.getenv("INFLUX_TOKEN")/g;
s/f"BUCKET_NAME"/os.getenv("INFLUX_DATABASE")/g;
s/f"DATABASE_NAME"/os.getenv("INFLUX_DATABASE")/g;
s|f"{{< influxdb/host >}}"|os.getenv("INFLUX_HOSTNAME")|g;
s|f"RETENTION_POLICY_NAME\|RETENTION_POLICY"|"autogen"|g;
' $file
# Shell-specific replacements.
## In JSON Heredoc
sed -i 's|"orgID": "ORG_ID"|"orgID": "$INFLUX_ORG"|g;
s|"name": "BUCKET_NAME"|"name": "$INFLUX_DATABASE"|g;' \
$file
sed -i 's/API_TOKEN/$INFLUX_TOKEN/g;
s/ORG_ID/$INFLUX_ORG/g;
s/DATABASE_TOKEN/$INFLUX_TOKEN/g;
s/--bucket-id BUCKET_ID/--bucket-id $INFLUX_BUCKET_ID/g;
s/BUCKET_NAME/$INFLUX_DATABASE/g;
s/DATABASE_NAME/$INFLUX_DATABASE/g;
s/--id DBRP_ID/--id $INFLUX_DBRP_ID/g;
s/get-started/$INFLUX_DATABASE/g;
s/RETENTION_POLICY_NAME\|RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g;
s/CONFIG_NAME/CONFIG_$(shuf -i 0-100 -n1)/g;' \
$file
# v2-specific replacements.
sed -i 's|https:\/\/us-west-2-1.aws.cloud2.influxdata.com|$INFLUX_HOST|g;
s|{{< latest-patch >}}|${influxdb_latest_patches_v2}|g;
s|{{< latest-patch cli=true >}}|${influxdb_latest_cli_v2}|g;' \
$file
# Skip package manager commands.
sed -i 's|sudo dpkg.*$||g;
s|sudo yum.*$||g;' \
$file
# Environment-specific replacements.
sed -i 's|sudo ||g;' \
$file
fi
done
}
setup() {
# Parse YAML config files into dotenv files to be used by tests.
parse_yaml /app/appdata/products.yml > /app/appdata/.env.products
# Miscellaneous test setup.
# For macOS samples.
mkdir -p ~/Downloads && rm -rf ~/Downloads/*
}
prepare_tests() {
TEST_FILES="$*"
# Remove files from the previous run.
rm -rf "$TEST_CONTENT"/*
# Copy the test files to the target directory while preserving the directory structure.
for FILE in $TEST_FILES; do
# Create the parent directories of the destination file
#mkdir -p "$(dirname "$TEST_TARGET/$FILE")"
# Copy the file
rsync -avz --relative --log-file=./test.log "$FILE" /app/
done
substitute_placeholders
}
# If arguments were passed and the first argument is not --files, run the command. This is useful for running "/bin/bash" for debugging the container.
# If --files is passed, prepare all remaining arguments as test files.
# Otherwise (no arguments), run the setup function and return existing files to be tested.
if [ "$1" != "--files" ]; then
echo "Executing $0 without --files argument."
"$@"
fi
if [ "$1" == "--files" ]; then
shift
prepare_tests "$@"
fi
setup
# Return new or existing files to be tested.
find "$TEST_CONTENT" -type f -name '*.md'

745
yarn.lock

File diff suppressed because it is too large Load Diff