chore(ci): Improve CI lint and test runners
- Reconfigures prettier linting. - Adds .editorconfig to help with consistent editor settings - Refactors test runs: - Removes test configuration from compose.yaml (not suited for this use case). - Splits test runner into test content setup and pytest that can be run separately or together (and with other test runners in the future). - Configuration is in Dockerfiles and command line (`.lintstagedrc.mjs`) - Updates CONTRIBUTING.md - Updates client library write examples in cloud-dedicated and clustered.pull/5503/head
parent
37dd3eaa8d
commit
5c74f013a1
|
@ -0,0 +1,6 @@
|
|||
charset = utf-8
|
||||
insert_final_newline = true
|
||||
end_of_line = lf
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
max_line_length = 80
|
|
@ -3,6 +3,8 @@
|
|||
public
|
||||
.*.swp
|
||||
node_modules
|
||||
.config*
|
||||
**/.env*
|
||||
*.log
|
||||
/resources
|
||||
.hugo_build.lock
|
||||
|
@ -10,4 +12,6 @@ node_modules
|
|||
/api-docs/redoc-static.html*
|
||||
.vscode/*
|
||||
.idea
|
||||
config.toml
|
||||
package-lock.json
|
||||
tmp
|
|
@ -1,2 +1 @@
|
|||
npx lint-staged --relative --verbose
|
||||
yarn run test
|
||||
npx lint-staged --relative
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
// Lint-staged configuration. This file must export a lint-staged configuration object.
|
||||
|
||||
function lintStagedContent(paths, productPath) {
|
||||
const name = `staged-${productPath.replace(/\//g, '-')}`;
|
||||
|
||||
return [
|
||||
`prettier --write ${paths.join(' ')}`,
|
||||
|
||||
`docker build . -f Dockerfile.tests -t influxdata-docs/tests:latest`,
|
||||
|
||||
// Remove any existing test container.
|
||||
`docker rm -f ${name} || true`,
|
||||
|
||||
`docker run --name ${name} --mount type=volume,target=/app/content --mount type=bind,src=./content,dst=/src/content --mount type=bind,src=./static/downloads,dst=/app/data
|
||||
influxdata-docs/tests --files "${paths.join(' ')}"`,
|
||||
|
||||
`docker build . -f Dockerfile.pytest -t influxdata-docs/pytest:latest`,
|
||||
|
||||
// Run test runners. If tests fail, the container will be removed,
|
||||
//but the "test-" container will remain until the next run.
|
||||
`docker run --env-file ${productPath}/.env.test
|
||||
--volumes-from ${name} --rm
|
||||
influxdata-docs/pytest --codeblocks ${productPath}/`
|
||||
];
|
||||
}
|
||||
|
||||
export default {
|
||||
"*.{js,css}": paths => `prettier --write ${paths.join(' ')}`,
|
||||
|
||||
// Don't let prettier check or write Markdown files for now;
|
||||
// it indents code blocks within list items, which breaks Hugo's rendering.
|
||||
// "*.md": paths => `prettier --check ${paths.join(' ')}`,
|
||||
|
||||
"content/influxdb/cloud-dedicated/**/*.md":
|
||||
paths => lintStagedContent(paths, 'content/influxdb/cloud-dedicated'),
|
||||
"content/influxdb/clustered/**/*.md":
|
||||
paths => lintStagedContent(paths, 'content/influxdb/clustered'),
|
||||
|
||||
// "content/influxdb/cloud-serverless/**/*.md": "docker compose run -T lint --config=content/influxdb/cloud-serverless/.vale.ini --minAlertLevel=error",
|
||||
|
||||
// "content/influxdb/clustered/**/*.md": "docker compose run -T lint --config=content/influxdb/clustered/.vale.ini --minAlertLevel=error",
|
||||
|
||||
// "content/influxdb/{cloud,v2,telegraf}/**/*.md": "docker compose run -T lint --config=.vale.ini --minAlertLevel=error"
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
# Ignore Prettier checking for files
|
||||
**/.git
|
||||
**/.svn
|
||||
**/.hg
|
||||
**/node_modules
|
|
@ -1,4 +1,14 @@
|
|||
trailingComma: "es5"
|
||||
tabWidth: 2
|
||||
# ~/.prettierrc.yaml
|
||||
printWidth: 80
|
||||
semi: true
|
||||
singleQuote: true
|
||||
tabWidth: 2
|
||||
trailingComma: "es5"
|
||||
useTabs: false
|
||||
overrides:
|
||||
- files:
|
||||
- "*.md"
|
||||
- "*.markdown"
|
||||
options:
|
||||
proseWrap: "preserve"
|
||||
# Prettier also uses settings, such as indent, specified in .editorconfig
|
369
CONTRIBUTING.md
369
CONTRIBUTING.md
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,54 @@
|
|||
FROM golang:latest
|
||||
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y \
|
||||
curl \
|
||||
git \
|
||||
gpg \
|
||||
jq \
|
||||
maven \
|
||||
nodejs \
|
||||
npm \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-venv \
|
||||
wget
|
||||
|
||||
RUN ln -s /usr/bin/python3 /usr/bin/python
|
||||
|
||||
# Create a virtual environment for Python to avoid conflicts with the system Python and having to use the --break-system-packages flag when installing packages with pip.
|
||||
RUN python -m venv /opt/venv
|
||||
# Enable venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
# Prevents Python from writing pyc files.
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
# the application crashes without emitting any logs due to buffering.
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Some Python test dependencies (pytest-dotenv and pytest-codeblocks) aren't
|
||||
# available as packages in apt-cache, so use pip to download dependencies in a # separate step and use Docker's caching.
|
||||
COPY ./test/src/pytest.ini pytest.ini
|
||||
COPY ./test/src/requirements.txt requirements.txt
|
||||
RUN pip install -Ur requirements.txt
|
||||
|
||||
# Activate the Python virtual environment configured in the Dockerfile.
|
||||
RUN . /opt/venv/bin/activate
|
||||
|
||||
### Install InfluxDB clients for testing
|
||||
# Install InfluxDB keys to verify client installs.
|
||||
# Follow the install instructions (https://docs.influxdata.com/telegraf/v1/install/?t=curl), except for sudo (which isn't available in Docker).
|
||||
# influxdata-archive_compat.key GPG fingerprint:
|
||||
# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
|
||||
ADD https://repos.influxdata.com/influxdata-archive_compat.key ./influxdata-archive_compat.key
|
||||
RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
|
||||
|
||||
RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list
|
||||
|
||||
# Install InfluxDB clients to use in tests.
|
||||
RUN apt-get update && apt-get -y install telegraf influxdb2-cli influxctl
|
||||
COPY --chmod=755 ./test/config.toml /root/.config/influxctl/config.toml
|
||||
### End InfluxDB client installs
|
||||
|
||||
ENTRYPOINT [ "pytest" ]
|
||||
CMD [ "" ]
|
|
@ -0,0 +1,18 @@
|
|||
# Use the Dockerfile 1.2 syntax to leverage BuildKit features like cache mounts and inline mounts--temporary mounts that are only available during the build step, not at runtime.
|
||||
# syntax=docker/dockerfile:1.2
|
||||
|
||||
# Starting from a Go base image is easier than setting up the Go environment later.
|
||||
FROM python:3.9-slim
|
||||
|
||||
# Install the necessary packages for the test environment.
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y \
|
||||
rsync
|
||||
|
||||
COPY --chmod=755 ./test/src/parse_yaml.sh /usr/local/bin/parse_yaml
|
||||
COPY --chmod=755 ./test/src/prepare-content.sh /usr/local/bin/prepare-content
|
||||
COPY ./data/products.yml /app/appdata/products.yml
|
||||
|
||||
WORKDIR /src
|
||||
ENTRYPOINT [ "prepare-content" ]
|
||||
# The default command is an empty string to pass all command line arguments to the entrypoint and allow the entrypoint to run.
|
||||
CMD [ "" ]
|
44
compose.yaml
44
compose.yaml
|
@ -1,27 +1,39 @@
|
|||
# This is a Docker Compose file for the InfluxData documentation site.
|
||||
## Run documentation tests for code samples.
|
||||
name: influxdata-docs
|
||||
volumes:
|
||||
test-content:
|
||||
services:
|
||||
test:
|
||||
image: docs-v2-tests
|
||||
container_name: docs-v2-tests
|
||||
markdownlint:
|
||||
image: davidanson/markdownlint-cli2:v0.13.0
|
||||
container_name: markdownlint
|
||||
profiles:
|
||||
- test
|
||||
- ci
|
||||
- lint
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./test
|
||||
target: /usr/src/app/test
|
||||
- type: bind
|
||||
source: ./data
|
||||
target: /usr/src/app/test/data
|
||||
- type: bind
|
||||
source: ./static/downloads
|
||||
target: /usr/src/app/test/tmp/data
|
||||
source: .
|
||||
target: /workdir
|
||||
working_dir: /workdir
|
||||
build:
|
||||
context: .
|
||||
dockerfile: test.Dockerfile
|
||||
args:
|
||||
- SOURCE_DIR=test
|
||||
- DOCKER_IMAGE=docs-v2-tests
|
||||
vale:
|
||||
image: jdkato/vale:latest
|
||||
container_name: vale
|
||||
profiles:
|
||||
- ci
|
||||
- lint
|
||||
volumes:
|
||||
- type: bind
|
||||
source: .
|
||||
target: /workdir
|
||||
working_dir: /workdir
|
||||
entrypoint: ["/bin/vale"]
|
||||
build:
|
||||
context: .
|
||||
dockerfile_inline: |
|
||||
COPY .ci /src/.ci
|
||||
COPY **/.vale.ini /src/
|
||||
## Run InfluxData documentation with the hugo development server on port 1313.
|
||||
## For more information about the hugomods/hugo image, see
|
||||
## https://docker.hugomods.com/docs/development/docker-compose/
|
||||
|
|
|
@ -10,16 +10,16 @@ weight: 3
|
|||
influxdb/cloud-dedicated/tags: [get-started]
|
||||
---
|
||||
|
||||
{{% product-name %}} is the platform purpose-built to collect, store, and
|
||||
query time series data.
|
||||
It is powered by the InfluxDB 3.0 storage engine which provides a number of
|
||||
benefits including nearly unlimited series cardinality, improved query performance,
|
||||
and interoperability with widely used data processing tools and platforms.
|
||||
InfluxDB is the platform purpose-built to collect, store, and query
|
||||
time series data.
|
||||
{{% product-name %}} is powered by the InfluxDB 3.0 storage engine, that
|
||||
provides nearly unlimited series cardinality,
|
||||
improved query performance, and interoperability with widely used data
|
||||
processing tools and platforms.
|
||||
|
||||
**Time series data** is a sequence of data points indexed in time order.
|
||||
Data points typically consist of successive measurements made from the same
|
||||
source and are used to track changes over time.
|
||||
Examples of time series data include:
|
||||
**Time series data** is a sequence of data points indexed in time order. Data
|
||||
points typically consist of successive measurements made from the same source
|
||||
and are used to track changes over time. Examples of time series data include:
|
||||
|
||||
- Industrial sensor data
|
||||
- Server performance metrics
|
||||
|
@ -28,14 +28,14 @@ Examples of time series data include:
|
|||
- Rainfall measurements
|
||||
- Stock prices
|
||||
|
||||
This multi-part tutorial walks you through writing time series data to {{% product-name %}},
|
||||
querying, and then visualizing that data.
|
||||
This multi-part tutorial walks you through writing time series data to
|
||||
{{% product-name %}}, querying, and then visualizing that data.
|
||||
|
||||
## Key concepts before you get started
|
||||
|
||||
Before you get started using InfluxDB, it's important to understand how time series
|
||||
data is organized and stored in InfluxDB and some key definitions that are used
|
||||
throughout this documentation.
|
||||
Before you get started using InfluxDB, it's important to understand how time
|
||||
series data is organized and stored in InfluxDB and some key definitions that
|
||||
are used throughout this documentation.
|
||||
|
||||
- [Data organization](#data-organization)
|
||||
- [Schema on write](#schema-on-write)
|
||||
|
@ -44,43 +44,53 @@ throughout this documentation.
|
|||
### Data organization
|
||||
|
||||
The {{% product-name %}} data model organizes time series data into databases
|
||||
and measurements.
|
||||
and tables.
|
||||
|
||||
A database can contain multiple measurements.
|
||||
Measurements contain multiple tags and fields.
|
||||
A database can contain multiple tables.
|
||||
Tables contain multiple tags and fields.
|
||||
|
||||
- **Database**: Named location where time series data is stored.
|
||||
A database can contain multiple _measurements_.
|
||||
- **Measurement**: Logical grouping for time series data.
|
||||
All _points_ in a given measurement should have the same _tags_.
|
||||
A measurement contains multiple _tags_ and _fields_.
|
||||
- **Tags**: Key-value pairs that provide metadata for each point--for example,
|
||||
something to identify the source or context of the data like host,
|
||||
location, station, etc.
|
||||
Tag values may be null.
|
||||
- **Fields**: Key-value pairs with values that change over time--for example,
|
||||
temperature, pressure, stock price, etc.
|
||||
Field values may be null, but at least one field value is not null on any given row.
|
||||
- **Timestamp**: Timestamp associated with the data.
|
||||
When stored on disk and queried, all data is ordered by time.
|
||||
A timestamp is never null.
|
||||
- **Database**: A named location where time series data is stored in _tables_.
|
||||
_Database_ is synonymous with _bucket_ in InfluxDB Cloud Serverless and InfluxDB TSM.
|
||||
- **Table**: A logical grouping for time series data. All _points_ in a given
|
||||
table should have the same _tags_. A table contains _tags_ and
|
||||
_fields_. _Table_ is synonymous with _measurement_ in InfluxDB Cloud
|
||||
Serverless and InfluxDB TSM.
|
||||
- **Tags**: Key-value pairs that provide metadata for each point--for
|
||||
example, something to identify the source or context of the data like
|
||||
host, location, station, etc. Tag values may be null.
|
||||
- **Fields**: Key-value pairs with values that change over time--for
|
||||
example, temperature, pressure, stock price, etc. Field values may be
|
||||
null, but at least one field value is not null on any given row.
|
||||
- **Timestamp**: Timestamp associated with the data. When stored on disk and
|
||||
queried, all data is ordered by time. A timestamp is never null.
|
||||
|
||||
{{% note %}}
|
||||
|
||||
#### What about buckets and measurements?
|
||||
|
||||
If coming from InfluxDB Cloud Serverless or InfluxDB powered by the TSM storage
|
||||
engine, you're likely familiar with the concepts _bucket_ and _measurement_.
|
||||
_Bucket_ in TSM or InfluxDB Cloud Serverless is synonymous with _database_ in
|
||||
{{% product-name %}}. _Measurement_ in TSM or InfluxDB Cloud Serverless is
|
||||
synonymous with _table_ in {{% product-name %}}.
|
||||
{{% /note %}}
|
||||
|
||||
### Schema on write
|
||||
|
||||
When using InfluxDB, you define your schema as you write your data.
|
||||
You don't need to create measurements (equivalent to a relational table) or
|
||||
explicitly define the schema of the measurement.
|
||||
Measurement schemas are defined by the schema of data as it is written to the measurement.
|
||||
As you write data to InfluxDB, the data defines the table schema. You don't need
|
||||
to create tables or explicitly define the table schema.
|
||||
|
||||
### Important definitions
|
||||
|
||||
The following definitions are important to understand when using InfluxDB:
|
||||
|
||||
- **Point**: Single data record identified by its _measurement, tag keys, tag values, field key, and timestamp_.
|
||||
- **Series**: A group of points with the same _measurement, tag keys and values, and field key_.
|
||||
- **Primary key**: Columns used to uniquely identify each row in a table.
|
||||
Rows are uniquely identified by their _timestamp and tag set_.
|
||||
A row's primary key _tag set_ does not include tags with null values.
|
||||
- **Point**: Single data record identified by its _measurement, tag keys, tag
|
||||
values, field key, and timestamp_.
|
||||
- **Series**: A group of points with the same _measurement, tag keys and values,
|
||||
and field key_.
|
||||
- **Primary key**: Columns used to uniquely identify each row in a table. Rows
|
||||
are uniquely identified by their _timestamp and tag set_. A row's primary key
|
||||
_tag set_ does not include tags with null values.
|
||||
|
||||
##### Example InfluxDB query results
|
||||
|
||||
|
@ -88,8 +98,8 @@ The following definitions are important to understand when using InfluxDB:
|
|||
|
||||
## Tools to use
|
||||
|
||||
The following table compares tools that you can use to interact with {{% product-name %}}.
|
||||
This tutorial covers many of the recommended tools.
|
||||
The following table compares tools that you can use to interact with
|
||||
{{% product-name %}}. This tutorial covers many of the recommended tools.
|
||||
|
||||
| Tool | Administration | Write | Query |
|
||||
| :-------------------------------------------------------------------------------------------------- | :----------------------: | :----------------------: | :----------------------: |
|
||||
|
@ -114,39 +124,52 @@ This tutorial covers many of the recommended tools.
|
|||
{{< /caption >}}
|
||||
|
||||
{{% warn %}}
|
||||
Avoid using the `influx` CLI with {{% product-name %}}.
|
||||
While it may coincidentally work, it isn't supported.
|
||||
Avoid using the `influx` CLI with {{% product-name %}}. While it
|
||||
may coincidentally work, it isn't supported.
|
||||
{{% /warn %}}
|
||||
|
||||
### `influxctl` CLI
|
||||
|
||||
The [`influxctl` command line interface (CLI)](/influxdb/cloud-dedicated/reference/cli/influxctl/)
|
||||
The
|
||||
[`influxctl` command line interface (CLI)](/influxdb/cloud-dedicated/reference/cli/influxctl/)
|
||||
writes, queries, and performs administrative tasks, such as managing databases
|
||||
and authorization tokens in a cluster.
|
||||
|
||||
### `influx3` data CLI
|
||||
|
||||
The [`influx3` data CLI](/influxdb/cloud-dedicated/get-started/query/?t=influx3+CLI#execute-an-sql-query) is a community-maintained tool that lets you write and query data in {{% product-name %}} from a command line.
|
||||
It uses the HTTP API to write data and uses Flight gRPC to query data.
|
||||
The
|
||||
[`influx3` data CLI](/influxdb/cloud-dedicated/get-started/query/?t=influx3+CLI#execute-an-sql-query)
|
||||
is a community-maintained tool that lets you write and query data in
|
||||
{{% product-name %}} from a command line. It uses the HTTP API to write data and
|
||||
uses Flight gRPC to query data.
|
||||
|
||||
### InfluxDB HTTP API
|
||||
|
||||
The [InfluxDB HTTP API](/influxdb/v2/reference/api/) provides a simple way to let you manage {{% product-name %}} and write and query data using HTTP(S) clients.
|
||||
Examples in this tutorial use cURL, but any HTTP(S) client will work.
|
||||
The [InfluxDB HTTP API](/influxdb/v2/reference/api/) provides a simple way to
|
||||
let you manage {{% product-name %}} and write and query data using HTTP(S)
|
||||
clients. Examples in this tutorial use cURL, but any HTTP(S) client will work.
|
||||
|
||||
The `/write` and `/query` v1-compatible endpoints work with the username/password authentication schemes and existing InfluxDB 1.x tools and code.
|
||||
The `/api/v2/write` v2-compatible endpoint works with existing InfluxDB 2.x tools and code.
|
||||
The `/write` and `/query` v1-compatible endpoints work with the
|
||||
username/password authentication schemes and existing InfluxDB 1.x tools and
|
||||
code. The `/api/v2/write` v2-compatible endpoint works with existing InfluxDB
|
||||
2.x tools and code.
|
||||
|
||||
### InfluxDB client libraries
|
||||
|
||||
InfluxDB client libraries are community-maintained, language-specific clients that interact with InfluxDB APIs.
|
||||
InfluxDB client libraries are community-maintained, language-specific clients
|
||||
that interact with InfluxDB APIs.
|
||||
|
||||
[InfluxDB v3 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v3/) are the recommended client libraries for writing and querying data {{% product-name %}}.
|
||||
They use the HTTP API to write data and use Flight gRPC to query data.
|
||||
[InfluxDB v3 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v3/)
|
||||
are the recommended client libraries for writing and querying data
|
||||
{{% product-name %}}. They use the HTTP API to write data and use InfluxDB's
|
||||
Flight gRPC API to query data.
|
||||
|
||||
[InfluxDB v2 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v2/) can use `/api/v2` HTTP endpoints to manage resources such as buckets and API tokens, and write data in {{% product-name %}}.
|
||||
[InfluxDB v2 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v2/)
|
||||
can use `/api/v2` HTTP endpoints to manage resources such as buckets and API
|
||||
tokens, and write data in {{% product-name %}}.
|
||||
|
||||
[InfluxDB v1 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v1/) can write data to {{% product-name %}}.
|
||||
[InfluxDB v1 client libraries](/influxdb/cloud-dedicated/reference/client-libraries/v1/)
|
||||
can write data to {{% product-name %}}.
|
||||
|
||||
## Authorization
|
||||
|
||||
|
@ -158,13 +181,14 @@ There are two types of tokens:
|
|||
- **Database token**: A token that grants read and write access to InfluxDB
|
||||
databases.
|
||||
- **Management token**: A short-lived (1 hour) [Auth0 token](#) used to
|
||||
administer your InfluxDB cluster.
|
||||
These are generated by the `influxctl` CLI and do not require any direct management.
|
||||
Management tokens authorize a user to perform tasks related to:
|
||||
administer your InfluxDB cluster. These are generated by the `influxctl` CLI
|
||||
and do not require any direct management. Management tokens authorize a user
|
||||
to perform tasks related to:
|
||||
|
||||
- Account management
|
||||
- Database management
|
||||
- Database token management
|
||||
- Pricing
|
||||
<!-- - Infrastructure management -->
|
||||
{{< page-nav next="/influxdb/cloud-dedicated/get-started/setup/" >}}
|
||||
|
||||
{{< page-nav next="/influxdb/clustered/get-started/setup/" >}}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
title: Use InfluxDB client libraries to write line protocol data
|
||||
description: >
|
||||
Use InfluxDB API clients to write line protocol data to InfluxDB Cloud Dedicated.
|
||||
Use InfluxDB API clients to write points as line protocol data to InfluxDB
|
||||
Cloud Dedicated.
|
||||
menu:
|
||||
influxdb_cloud_dedicated:
|
||||
name: Use client libraries
|
||||
|
@ -13,23 +14,35 @@ related:
|
|||
- /influxdb/cloud-dedicated/get-started/write/
|
||||
---
|
||||
|
||||
Use InfluxDB client libraries to build line protocol, and then write it to an
|
||||
InfluxDB database.
|
||||
Use InfluxDB client libraries to build time series points, and then write them
|
||||
line protocol to an {{% product-name %}} database.
|
||||
|
||||
- [Construct line protocol](#construct-line-protocol)
|
||||
- [Example home schema](#example-home-schema)
|
||||
- [Set up your project](#set-up-your-project)
|
||||
- [Construct points and write line protocol](#construct-points-and-write-line-protocol)
|
||||
- [Run the example](#run-the-example)
|
||||
- [Home sensor data line protocol](#home-sensor-data-line-protocol)
|
||||
|
||||
## Construct line protocol
|
||||
|
||||
With a [basic understanding of line protocol](/influxdb/cloud-dedicated/write-data/line-protocol/),
|
||||
you can now construct line protocol and write data to InfluxDB.
|
||||
Consider a use case where you collect data from sensors in your home.
|
||||
Each sensor collects temperature, humidity, and carbon monoxide readings.
|
||||
With a
|
||||
[basic understanding of line protocol](/influxdb/cloud-dedicated/write-data/line-protocol/),
|
||||
you can construct line protocol data and write it to InfluxDB.
|
||||
|
||||
All InfluxDB client libraries write data in line protocol format to InfluxDB.
|
||||
Client library `write` methods let you provide data as raw line protocol or as
|
||||
`Point` objects that the client library converts to line protocol. If your
|
||||
program creates the data you write to InfluxDB, use the client library `Point`
|
||||
interface to take advantage of type safety in your program.
|
||||
|
||||
### Example home schema
|
||||
|
||||
Consider a use case where you collect data from sensors in your home. Each
|
||||
sensor collects temperature, humidity, and carbon monoxide readings.
|
||||
|
||||
To collect this data, use the following schema:
|
||||
|
||||
<!-- vale InfluxDataDocs.v3Schema = NO -->
|
||||
|
||||
- **measurement**: `home`
|
||||
- **tags**
|
||||
- `room`: Living Room or Kitchen
|
||||
|
@ -39,337 +52,427 @@ To collect this data, use the following schema:
|
|||
- `co`: carbon monoxide in parts per million (integer)
|
||||
- **timestamp**: Unix timestamp in _second_ precision
|
||||
|
||||
The following example shows how to construct and write points that follow this schema.
|
||||
<!-- vale InfluxDataDocs.v3Schema = YES -->
|
||||
|
||||
The following example shows how to construct and write points that follow the
|
||||
`home` schema.
|
||||
|
||||
## Set up your project
|
||||
|
||||
The examples in this guide assume you followed [Set up InfluxDB](/influxdb/cloud-dedicated/get-started/setup/)
|
||||
and [Write data set up](/influxdb/cloud-dedicated/get-started/write/#set-up-your-project-and-credentials)
|
||||
The examples in this guide assume you followed
|
||||
[Set up InfluxDB](/influxdb/cloud-dedicated/get-started/setup/) and
|
||||
[Write data set up](/influxdb/cloud-dedicated/get-started/write/#set-up-your-project-and-credentials)
|
||||
instructions in [Get started](/influxdb/cloud-dedicated/get-started/).
|
||||
|
||||
After setting up InfluxDB and your project, you should have the following:
|
||||
|
||||
- {{< product-name >}} credentials:
|
||||
|
||||
- [Database](/influxdb/cloud-dedicated/admin/databases/)
|
||||
- [Database token](/influxdb/cloud-dedicated/admin/tokens/#database-tokens)
|
||||
- Cluster hostname
|
||||
- [Database](/influxdb/cloud-dedicated/admin/databases/)
|
||||
- [Database token](/influxdb/cloud-dedicated/admin/tokens/#database-tokens)
|
||||
- Cluster hostname
|
||||
|
||||
- A directory for your project.
|
||||
|
||||
- Credentials stored as environment variables or in a project configuration file--for example, a `.env` ("dotenv") file.
|
||||
- Credentials stored as environment variables or in a project configuration
|
||||
file--for example, a `.env` ("dotenv") file.
|
||||
|
||||
- Client libraries installed for writing data to InfluxDB.
|
||||
|
||||
The following example shows how to construct `Point` objects that follow the [example `home` schema](#example-home-schema), and then write the points as line protocol to an
|
||||
{{% product-name %}} database.
|
||||
The following example shows how to construct `Point` objects that follow the
|
||||
[example `home` schema](#example-home-schema), and then write the data as line
|
||||
protocol to an {{% product-name %}} database.
|
||||
|
||||
The examples use InfluxDB v3 client libraries. For examples using InfluxDB v2
|
||||
client libraries to write data to InfluxDB v3, see
|
||||
[InfluxDB v2 clients](/influxdb/cloud-dedicated/reference/client-libraries/v2/).
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
|
||||
<!-- prettier-ignore -->
|
||||
[Go](#)
|
||||
[Node.js](#)
|
||||
[Python](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
The following steps set up a Go project using the
|
||||
[InfluxDB v3 Go client](https://github.com/InfluxCommunity/influxdb3-go/):
|
||||
|
||||
<!-- BEGIN GO PROJECT SETUP -->
|
||||
|
||||
1. Install [Go 1.13 or later](https://golang.org/doc/install).
|
||||
1. Install [Go 1.13 or later](https://golang.org/doc/install).
|
||||
|
||||
2. Inside of your project directory, install the client package to your project dependencies.
|
||||
1. Create a directory for your Go module and change to the directory--for
|
||||
example:
|
||||
|
||||
```sh
|
||||
go get github.com/influxdata/influxdb-client-go/v2
|
||||
```
|
||||
```sh
|
||||
mkdir iot-starter-go && cd $_
|
||||
```
|
||||
|
||||
1. Initialize a Go module--for example:
|
||||
|
||||
```sh
|
||||
go mod init iot-starter
|
||||
```
|
||||
|
||||
1. Install [`influxdb3-go`](https://github.com/InfluxCommunity/influxdb3-go/),
|
||||
which provides the InfluxDB `influxdb3` Go client library module.
|
||||
|
||||
```sh
|
||||
go get github.com/InfluxCommunity/influxdb3-go
|
||||
```
|
||||
|
||||
<!-- END GO SETUP PROJECT -->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
{{% /tab-content %}} {{% tab-content %}}
|
||||
|
||||
<!-- BEGIN NODE.JS PROJECT SETUP -->
|
||||
|
||||
Inside of your project directory, install the `@influxdata/influxdb-client` InfluxDB v2 JavaScript client library.
|
||||
The following steps set up a JavaScript project using the
|
||||
[InfluxDB v3 JavaScript client](https://github.com/InfluxCommunity/influxdb3-js/).
|
||||
|
||||
```sh
|
||||
npm install --save @influxdata/influxdb-client
|
||||
```
|
||||
1. Install [Node.js](https://nodejs.org/en/download/).
|
||||
|
||||
1. Create a directory for your JavaScript project and change to the
|
||||
directory--for example:
|
||||
|
||||
```sh
|
||||
mkdir -p iot-starter-js && cd $_
|
||||
```
|
||||
|
||||
1. Initialize a project--for example, using `npm`:
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```sh
|
||||
npm init
|
||||
```
|
||||
|
||||
1. Install the `@influxdata/influxdb3-client` InfluxDB v3 JavaScript client
|
||||
library.
|
||||
|
||||
```sh
|
||||
npm install @influxdata/influxdb3-client
|
||||
```
|
||||
|
||||
<!-- END NODE.JS SETUP PROJECT -->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
{{% /tab-content %}} {{% tab-content %}}
|
||||
|
||||
<!-- BEGIN PYTHON SETUP PROJECT -->
|
||||
|
||||
1. **Optional, but recommended**: Use [`venv`](https://docs.python.org/3/library/venv.html)) or [`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual environment for installing and executing code--for example:
|
||||
The following steps set up a Python project using the
|
||||
[InfluxDB v3 Python client](https://github.com/InfluxCommunity/influxdb3-python/):
|
||||
|
||||
Inside of your project directory, enter the following command using `venv` to create and activate a virtual environment for the project:
|
||||
1. Install [Python](https://www.python.org/downloads/)
|
||||
|
||||
```sh
|
||||
python3 -m venv envs/env1 && source ./envs/env1/bin/activate
|
||||
```
|
||||
1. Inside of your project directory, create a directory for your Python module
|
||||
and change to the module directory--for example:
|
||||
|
||||
2. Install the [`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python), which provides the InfluxDB `influxdb_client_3` Python client library module and also installs the [`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for working with Arrow data.
|
||||
```sh
|
||||
mkdir -p iot-starter-py && cd $_
|
||||
```
|
||||
|
||||
```sh
|
||||
pip install influxdb3-python
|
||||
```
|
||||
1. **Optional, but recommended**: Use
|
||||
[`venv`](https://docs.python.org/3/library/venv.html) or
|
||||
[`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual
|
||||
environment for installing and executing code--for example, enter the
|
||||
following command using `venv` to create and activate a virtual environment
|
||||
for the project:
|
||||
|
||||
```bash
|
||||
python3 -m venv envs/iot-starter && source ./envs/iot-starter/bin/activate
|
||||
```
|
||||
|
||||
1. Install
|
||||
[`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python),
|
||||
which provides the InfluxDB `influxdb_client_3` Python client library module
|
||||
and also installs the
|
||||
[`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for
|
||||
working with Arrow data.
|
||||
|
||||
```sh
|
||||
pip install influxdb3-python
|
||||
```
|
||||
|
||||
<!-- END PYTHON SETUP PROJECT -->
|
||||
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
## Construct points and write line protocol
|
||||
|
||||
Client libraries provide one or more `Point` constructor methods. Some libraries
|
||||
support language-native data structures, such as Go's `struct`, for creating
|
||||
points.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
|
||||
<!-- prettier-ignore -->
|
||||
[Go](#)
|
||||
[Node.js](#)
|
||||
[Python](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
<!-- BEGIN GO SETUP SAMPLE -->
|
||||
|
||||
1. Create a file for your module--for example: `write-point.go`.
|
||||
1. Create a file for your module--for example: `main.go`.
|
||||
|
||||
2. In `write-point.go`, enter the following sample code:
|
||||
1. In `main.go`, enter the following sample code:
|
||||
|
||||
```go
|
||||
package main
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
"fmt"
|
||||
"github.com/influxdata/influxdb-client-go/v2"
|
||||
)
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"fmt"
|
||||
"time"
|
||||
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
|
||||
"github.com/influxdata/line-protocol/v2/lineprotocol"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Set a log level constant
|
||||
const debugLevel uint = 4
|
||||
func Write() error {
|
||||
url := os.Getenv("INFLUX_HOST")
|
||||
token := os.Getenv("INFLUX_TOKEN")
|
||||
database := os.Getenv("INFLUX_DATABASE")
|
||||
|
||||
/**
|
||||
* Define options for the client.
|
||||
* Instantiate the client with the following arguments:
|
||||
* - An object containing InfluxDB URL and token credentials.
|
||||
* - Write options for batch size and timestamp precision.
|
||||
// To instantiate a client, call New() with InfluxDB credentials.
|
||||
client, err := influxdb3.New(influxdb3.ClientConfig{
|
||||
Host: url,
|
||||
Token: token,
|
||||
Database: database,
|
||||
})
|
||||
|
||||
/** Use a deferred function to ensure the client is closed when the
|
||||
* function returns.
|
||||
**/
|
||||
clientOptions := influxdb2.DefaultOptions().
|
||||
SetBatchSize(20).
|
||||
SetLogLevel(debugLevel).
|
||||
SetPrecision(time.Second)
|
||||
defer func (client *influxdb3.Client) {
|
||||
err = client.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}(client)
|
||||
|
||||
client := influxdb2.NewClientWithOptions(os.Getenv("INFLUX_URL"),
|
||||
os.Getenv("INFLUX_TOKEN"),
|
||||
clientOptions)
|
||||
|
||||
/**
|
||||
* Create an asynchronous, non-blocking write client.
|
||||
* Provide your InfluxDB org and database as arguments
|
||||
/** Use the NewPoint method to construct a point.
|
||||
* NewPoint(measurement, tags map, fields map, time)
|
||||
**/
|
||||
writeAPI := client.WriteAPI(os.Getenv("INFLUX_ORG"), "get-started")
|
||||
point := influxdb3.NewPoint("home",
|
||||
map[string]string{
|
||||
"room": "Living Room",
|
||||
},
|
||||
map[string]any{
|
||||
"temp": 24.5,
|
||||
"hum": 40.5,
|
||||
"co": 15i},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
// Get the errors channel for the asynchronous write client.
|
||||
errorsCh := writeAPI.Errors()
|
||||
|
||||
/** Create a point.
|
||||
* Provide measurement, tags, and fields as arguments.
|
||||
/** Use the NewPointWithMeasurement method to construct a point with
|
||||
* method chaining.
|
||||
**/
|
||||
p := influxdb2.NewPointWithMeasurement("home").
|
||||
AddTag("room", "Kitchen").
|
||||
AddField("temp", 72.0).
|
||||
AddField("hum", 20.2).
|
||||
AddField("co", 9).
|
||||
SetTime(time.Now())
|
||||
point2 := influxdb3.NewPointWithMeasurement("home").
|
||||
SetTag("room", "Living Room").
|
||||
SetField("temp", 23.5).
|
||||
SetField("hum", 38.0).
|
||||
SetField("co", 16i).
|
||||
SetTimestamp(time.Now())
|
||||
|
||||
// Define a proc for handling errors.
|
||||
go func() {
|
||||
for err := range errorsCh {
|
||||
fmt.Printf("write error: %s\n", err.Error())
|
||||
}
|
||||
}()
|
||||
fmt.Println("Writing points")
|
||||
points := []*influxdb3.Point{point, point2}
|
||||
|
||||
// Write the point asynchronously
|
||||
writeAPI.WritePoint(p)
|
||||
/** Write points to InfluxDB.
|
||||
* You can specify WriteOptions, such as Gzip threshold,
|
||||
* default tags, and timestamp precision. Default precision is lineprotocol.Nanosecond
|
||||
**/
|
||||
err = client.WritePoints(context.Background(), points,
|
||||
influxdb3.WithPrecision(lineprotocol.Second))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send pending writes from the buffer to the database.
|
||||
writeAPI.Flush()
|
||||
func main() {
|
||||
Write()
|
||||
}
|
||||
```
|
||||
|
||||
1. To run the module and write the data to your {{% product-name %}} database,
|
||||
enter the following command in your terminal:
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```sh
|
||||
go run main.go
|
||||
```
|
||||
|
||||
<!-- END GO SAMPLE -->
|
||||
|
||||
{{% /tab-content %}} {{% tab-content %}}
|
||||
|
||||
// Ensure background processes finish and release resources.
|
||||
client.Close()
|
||||
}
|
||||
```
|
||||
<!-- END GO SETUP SAMPLE -->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!-- BEGIN NODE.JS SETUP SAMPLE -->
|
||||
|
||||
1. Create a file for your module--for example: `write-point.js`.
|
||||
1. Create a file for your module--for example: `write-points.js`.
|
||||
|
||||
2. In `write-point.js`, enter the following sample code:
|
||||
1. In `write-points.js`, enter the following sample code:
|
||||
|
||||
```js
|
||||
'use strict'
|
||||
/** @module write
|
||||
* Use the JavaScript client library for Node.js. to create a point and write it to InfluxDB
|
||||
**/
|
||||
```js
|
||||
// write-points.js
|
||||
import { InfluxDBClient, Point } from '@influxdata/influxdb3-client';
|
||||
|
||||
import {InfluxDB, Point} from '@influxdata/influxdb-client'
|
||||
/**
|
||||
* Set InfluxDB credentials.
|
||||
*/
|
||||
const host = process.env.INFLUX_HOST ?? '';
|
||||
const database = process.env.INFLUX_DATABASE;
|
||||
const token = process.env.INFLUX_TOKEN;
|
||||
|
||||
/** Get credentials from the environment **/
|
||||
const url = process.env.INFLUX_URL
|
||||
const token = process.env.INFLUX_TOKEN
|
||||
const org = process.env.INFLUX_ORG
|
||||
/**
|
||||
* Write line protocol to InfluxDB using the JavaScript client library.
|
||||
*/
|
||||
export async function writePoints() {
|
||||
/**
|
||||
* Instantiate an InfluxDBClient.
|
||||
* Provide the host URL and the database token.
|
||||
*/
|
||||
const client = new InfluxDBClient({ host, token });
|
||||
|
||||
/**
|
||||
* Instantiate a client with a configuration object
|
||||
* that contains your InfluxDB URL and token.
|
||||
**/
|
||||
const influxDB = new InfluxDB({url, token})
|
||||
/** Use the fluent interface with chained methods to construct Points. */
|
||||
const point = Point.measurement('home')
|
||||
.setTag('room', 'Living Room')
|
||||
.setFloatField('temp', 22.2)
|
||||
.setFloatField('hum', 35.5)
|
||||
.setIntegerField('co', 7)
|
||||
.setTimestamp(new Date().getTime() / 1000);
|
||||
|
||||
/**
|
||||
* Create a write client configured to write to the database.
|
||||
* Provide your InfluxDB org and database.
|
||||
**/
|
||||
const writeApi = influxDB.getWriteApi(org, 'get-started')
|
||||
const point2 = Point.measurement('home')
|
||||
.setTag('room', 'Kitchen')
|
||||
.setFloatField('temp', 21.0)
|
||||
.setFloatField('hum', 35.9)
|
||||
.setIntegerField('co', 0)
|
||||
.setTimestamp(new Date().getTime() / 1000);
|
||||
|
||||
/**
|
||||
* Create a point and add tags and fields.
|
||||
* To add a field, call the field method for your data type.
|
||||
**/
|
||||
const point1 = new Point('home')
|
||||
.tag('room', 'Kitchen')
|
||||
.floatField('temp', 72.0)
|
||||
.floatField('hum', 20.2)
|
||||
.intField('co', 9)
|
||||
console.log(` ${point1}`)
|
||||
/** Write points to InfluxDB.
|
||||
* The write method accepts an array of points, the target database, and
|
||||
* an optional configuration object.
|
||||
* You can specify WriteOptions, such as Gzip threshold, default tags,
|
||||
* and timestamp precision. Default precision is lineprotocol.Nanosecond
|
||||
**/
|
||||
|
||||
/**
|
||||
* Add the point to the batch.
|
||||
**/
|
||||
writeApi.writePoint(point1)
|
||||
try {
|
||||
await client.write([point, point2], database, '', { precision: 's' });
|
||||
console.log('Data has been written successfully!');
|
||||
} catch (error) {
|
||||
console.error(`Error writing data to InfluxDB: ${error.body}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush pending writes in the batch from the buffer and close the write client.
|
||||
**/
|
||||
writeApi.close().then(() => {
|
||||
console.log('WRITE FINISHED')
|
||||
})
|
||||
```
|
||||
<!-- END NODE.JS SETUP SAMPLE -->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!-- BEGIN PYTHON SETUP SAMPLE -->
|
||||
client.close();
|
||||
}
|
||||
|
||||
1. Create a file for your module--for example: `write-point.py`.
|
||||
writePoints();
|
||||
```
|
||||
|
||||
2. In `write-point.py`, enter the following sample code to write data in batching mode:
|
||||
1. To run the module and write the data to your {{\< product-name >}} database,
|
||||
enter the following command in your terminal:
|
||||
|
||||
```python
|
||||
import os
|
||||
from influxdb_client_3 import Point, write_client_options, WritePrecision, WriteOptions, InfluxDBError
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
# Create an array of points with tags and fields.
|
||||
points = [Point("home")
|
||||
.tag("room", "Kitchen")
|
||||
.field("temp", 25.3)
|
||||
.field('hum', 20.2)
|
||||
.field('co', 9)]
|
||||
```sh
|
||||
node writePoints.js
|
||||
```
|
||||
|
||||
# With batching mode, define callbacks to execute after a successful or failed write request.
|
||||
# Callback methods receive the configuration and data sent in the request.
|
||||
def success(self, data: str):
|
||||
print(f"Successfully wrote batch: data: {data}")
|
||||
<!-- END NODE.JS SAMPLE -->
|
||||
|
||||
def error(self, data: str, exception: InfluxDBError):
|
||||
print(f"Failed writing batch: config: {self}, data: {data} due: {exception}")
|
||||
{{% /tab-content %}} {{% tab-content %}}
|
||||
|
||||
def retry(self, data: str, exception: InfluxDBError):
|
||||
print(f"Failed retry writing batch: config: {self}, data: {data} retry: {exception}")
|
||||
<!-- BEGIN PYTHON SETUP SAMPLE -->
|
||||
|
||||
# Configure options for batch writing.
|
||||
write_options = WriteOptions(batch_size=500,
|
||||
flush_interval=10_000,
|
||||
jitter_interval=2_000,
|
||||
retry_interval=5_000,
|
||||
max_retries=5,
|
||||
max_retry_delay=30_000,
|
||||
exponential_base=2)
|
||||
1. Create a file for your module--for example: `write-points.py`.
|
||||
|
||||
# Create an options dict that sets callbacks and WriteOptions.
|
||||
wco = write_client_options(success_callback=success,
|
||||
error_callback=error,
|
||||
retry_callback=retry,
|
||||
WriteOptions=write_options)
|
||||
1. In `write-points.py`, enter the following sample code to write data in
|
||||
batching mode:
|
||||
|
||||
# Instantiate a synchronous instance of the client with your
|
||||
# InfluxDB credentials and write options.
|
||||
with InfluxDBClient3(host=config['INFLUX_HOST'],
|
||||
token=config['INFLUX_TOKEN'],
|
||||
database=config['INFLUX_DATABASE'],
|
||||
write_client_options=wco) as client:
|
||||
```python
|
||||
import os
|
||||
from influxdb_client_3 import (
|
||||
InfluxDBClient3, InfluxDBError, Point, WritePrecision,
|
||||
WriteOptions, write_client_options)
|
||||
|
||||
client.write(points, write_precision='s')
|
||||
```
|
||||
<!-- END PYTHON SETUP PROJECT -->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
host = os.getenv('INFLUX_HOST')
|
||||
token = os.getenv('INFLUX_TOKEN')
|
||||
database = os.getenv('INFLUX_DATABASE')
|
||||
|
||||
# Create an array of points with tags and fields.
|
||||
points = [Point("home")
|
||||
.tag("room", "Kitchen")
|
||||
.field("temp", 25.3)
|
||||
.field('hum', 20.2)
|
||||
.field('co', 9)]
|
||||
|
||||
# With batching mode, define callbacks to execute after a successful or
|
||||
# failed write request.
|
||||
# Callback methods receive the configuration and data sent in the request.
|
||||
def success(self, data: str):
|
||||
print(f"Successfully wrote batch: data: {data}")
|
||||
|
||||
def error(self, data: str, exception: InfluxDBError):
|
||||
print(f"Failed writing batch: config: {self}, data: {data} due: {exception}")
|
||||
|
||||
def retry(self, data: str, exception: InfluxDBError):
|
||||
print(f"Failed retry writing batch: config: {self}, data: {data} retry: {exception}")
|
||||
|
||||
# Configure options for batch writing.
|
||||
write_options = WriteOptions(batch_size=500,
|
||||
flush_interval=10_000,
|
||||
jitter_interval=2_000,
|
||||
retry_interval=5_000,
|
||||
max_retries=5,
|
||||
max_retry_delay=30_000,
|
||||
exponential_base=2)
|
||||
|
||||
# Create an options dict that sets callbacks and WriteOptions.
|
||||
wco = write_client_options(success_callback=success,
|
||||
error_callback=error,
|
||||
retry_callback=retry,
|
||||
write_options=write_options)
|
||||
|
||||
# Instantiate a synchronous instance of the client with your
|
||||
# InfluxDB credentials and write options, such as Gzip threshold, default tags,
|
||||
# and timestamp precision. Default precision is nanosecond ('ns').
|
||||
with InfluxDBClient3(host=host,
|
||||
token=token,
|
||||
database=database,
|
||||
write_client_options=wco) as client:
|
||||
|
||||
client.write(points, write_precision='s')
|
||||
```
|
||||
|
||||
1. To run the module and write the data to your {{< product-name >}} database,
|
||||
enter the following command in your terminal:
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```sh
|
||||
python write-points.py
|
||||
```
|
||||
|
||||
<!-- END PYTHON SETUP PROJECT -->
|
||||
|
||||
{{% /tab-content %}} {{< /tabs-wrapper >}}
|
||||
|
||||
The sample code does the following:
|
||||
|
||||
1. Instantiates a client configured with the InfluxDB URL and API token.
|
||||
<!-- vale InfluxDataDocs.v3Schema = NO -->
|
||||
|
||||
2. Uses the client to instantiate a **write client** with credentials.
|
||||
1. Instantiates a client configured with the InfluxDB URL and API token.
|
||||
1. Constructs `home`
|
||||
[measurement](/influxdb/cloud-dedicated/reference/glossary/#measurement)
|
||||
`Point` objects.
|
||||
1. Sends data as line protocol format to InfluxDB and waits for the response.
|
||||
1. If the write succeeds, logs the success message to stdout; otherwise, logs
|
||||
the failure message and error details.
|
||||
1. Closes the client to release resources.
|
||||
|
||||
3. Constructs a `Point` object with the [measurement](/influxdb/cloud-dedicated/reference/glossary/#measurement) name (`"home"`).
|
||||
|
||||
4. Adds a tag and fields to the point.
|
||||
|
||||
5. Adds the point to a batch to be written to the database.
|
||||
|
||||
6. Sends the batch to InfluxDB and waits for the response.
|
||||
|
||||
7. Executes callbacks for the response, flushes the write buffer, and releases resources.
|
||||
|
||||
## Run the example
|
||||
|
||||
To run the sample and write the data to your InfluxDB Cloud Dedicated database, enter the following command in your terminal:
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[Go](#)
|
||||
[Node.js](#)
|
||||
[Python](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
<!-- BEGIN GO RUN EXAMPLE -->
|
||||
|
||||
```sh
|
||||
go run write-point.go
|
||||
```
|
||||
|
||||
<!-- END GO RUN EXAMPLE -->
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
<!-- BEGIN NODE.JS RUN EXAMPLE -->
|
||||
|
||||
```sh
|
||||
node write-point.js
|
||||
```
|
||||
<!-- END NODE.JS RUN EXAMPLE -->
|
||||
{{% /code-tab-content %}}
|
||||
|
||||
{{% code-tab-content %}}
|
||||
<!-- BEGIN PYTHON RUN EXAMPLE -->
|
||||
|
||||
```sh
|
||||
python write-point.py
|
||||
```
|
||||
<!-- END PYTHON RUN EXAMPLE -->
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
The example logs the point as line protocol to stdout, and then writes the point to the database.
|
||||
The line protocol is similar to the following:
|
||||
|
||||
### Home sensor data line protocol
|
||||
|
||||
```sh
|
||||
home,room=Kitchen co=9i,hum=20.2,temp=72 1641024000
|
||||
```
|
||||
<!-- vale InfluxDataDocs.v3Schema = YES -->
|
||||
|
|
|
@ -10,17 +10,16 @@ weight: 3
|
|||
influxdb/clustered/tags: [get-started]
|
||||
---
|
||||
|
||||
{{% product-name %}} is a highly available InfluxDB cluster hosted and
|
||||
managed on your own infrastructure and is the platform purpose-built to collect,
|
||||
store, and query time series data.
|
||||
It is powered by the InfluxDB 3.0 storage engine which provides a number of
|
||||
benefits including nearly unlimited series cardinality, improved query performance,
|
||||
and interoperability with widely used data processing tools and platforms.
|
||||
InfluxDB is the platform purpose-built to collect, store, and query
|
||||
time series data.
|
||||
{{% product-name %}} is powered by the InfluxDB 3.0 storage engine, that
|
||||
provides nearly unlimited series cardinality,
|
||||
improved query performance, and interoperability with widely used data
|
||||
processing tools and platforms.
|
||||
|
||||
**Time series data** is a sequence of data points indexed in time order.
|
||||
Data points typically consist of successive measurements made from the same
|
||||
source and are used to track changes over time.
|
||||
Examples of time series data include:
|
||||
**Time series data** is a sequence of data points indexed in time order. Data
|
||||
points typically consist of successive measurements made from the same source
|
||||
and are used to track changes over time. Examples of time series data include:
|
||||
|
||||
- Industrial sensor data
|
||||
- Server performance metrics
|
||||
|
@ -45,33 +44,43 @@ throughout this documentation.
|
|||
### Data organization
|
||||
|
||||
The {{% product-name %}} data model organizes time series data into databases
|
||||
and measurements.
|
||||
and tables.
|
||||
|
||||
A database can contain multiple measurements.
|
||||
Measurements contain multiple tags and fields.
|
||||
A database can contain multiple tables.
|
||||
Tables contain multiple tags and fields.
|
||||
|
||||
- **Database**: Named location where time series data is stored.
|
||||
A database can contain multiple _measurements_.
|
||||
- **Measurement**: Logical grouping for time series data.
|
||||
All _points_ in a given measurement should have the same _tags_.
|
||||
A measurement contains multiple _tags_ and _fields_.
|
||||
- **Tags**: Key-value pairs that provide metadata for each point--for example,
|
||||
something to identify the source or context of the data like host,
|
||||
location, station, etc.
|
||||
Tag values may be null.
|
||||
- **Fields**: Key-value pairs with values that change over time--for example,
|
||||
temperature, pressure, stock price, etc.
|
||||
Field values may be null, but at least one field value is not null on any given row.
|
||||
- **Timestamp**: Timestamp associated with the data.
|
||||
When stored on disk and queried, all data is ordered by time.
|
||||
A timestamp is never null.
|
||||
- **Database**: A named location where time series data is stored in _tables_.
|
||||
_Database_ is synonymous with _bucket_ in InfluxDB Cloud Serverless and InfluxDB TSM.
|
||||
- **Table**: A logical grouping for time series data. All _points_ in a given
|
||||
table should have the same _tags_. A table contains _tags_ and
|
||||
_fields_. _Table_ is synonymous with _measurement_ in InfluxDB Cloud
|
||||
Serverless and InfluxDB TSM.
|
||||
- **Tags**: Key-value pairs that provide metadata for each point--for
|
||||
example, something to identify the source or context of the data like
|
||||
host, location, station, etc. Tag values may be null.
|
||||
- **Fields**: Key-value pairs with values that change over time--for
|
||||
example, temperature, pressure, stock price, etc. Field values may be
|
||||
null, but at least one field value is not null on any given row.
|
||||
- **Timestamp**: Timestamp associated with the data. When stored on disk and
|
||||
queried, all data is ordered by time. A timestamp is never null.
|
||||
|
||||
{{% note %}}
|
||||
|
||||
#### What about buckets and measurements?
|
||||
|
||||
If coming from InfluxDB Cloud Serverless or InfluxDB powered by the TSM storage engine, you're likely familiar
|
||||
with the concepts _bucket_ and _measurement_.
|
||||
_Bucket_ in TSM or InfluxDB Cloud Serverless is synonymous with
|
||||
_database_ in {{% product-name %}}.
|
||||
_Measurement_ in TSM or InfluxDB Cloud Serverless is synonymous with
|
||||
_table_ in {{% product-name %}}.
|
||||
{{% /note %}}
|
||||
|
||||
### Schema on write
|
||||
|
||||
When using InfluxDB, you define your schema as you write your data.
|
||||
You don't need to create measurements (equivalent to a relational table) or
|
||||
explicitly define the schema of the measurement.
|
||||
Measurement schemas are defined by the schema of data as it is written to the measurement.
|
||||
As you write data to InfluxDB, the data defines the table schema.
|
||||
You don't need to create tables or
|
||||
explicitly define the table schema.
|
||||
|
||||
### Important definitions
|
||||
|
||||
|
@ -121,7 +130,7 @@ While it may coincidentally work, it isn't supported.
|
|||
|
||||
### `influxctl` admin CLI
|
||||
|
||||
The [`influxctl` command line interface (CLI)](/influxdb/cloud-dedicated/reference/cli/influxctl/)
|
||||
The [`influxctl` command line interface (CLI)](/influxdb/clustered/reference/cli/influxctl/)
|
||||
writes, queries, and performs administrative tasks, such as managing databases
|
||||
and authorization tokens in a cluster.
|
||||
|
||||
|
@ -143,7 +152,7 @@ The `/api/v2/write` v2-compatible endpoint works with existing InfluxDB 2.x tool
|
|||
InfluxDB client libraries are community-maintained, language-specific clients that interact with InfluxDB APIs.
|
||||
|
||||
[InfluxDB v3 client libraries](/influxdb/clustered/reference/client-libraries/v3/) are the recommended client libraries for writing and querying data {{% product-name %}}.
|
||||
They use the HTTP API to write data and use Flight gRPC to query data.
|
||||
They use the HTTP API to write data and use InfluxDB's Flight gRPC API to query data.
|
||||
|
||||
[InfluxDB v2 client libraries](/influxdb/clustered/reference/client-libraries/v2/) can use `/api/v2` HTTP endpoints to manage resources such as buckets and API tokens, and write data in {{% product-name %}}.
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,375 +0,0 @@
|
|||
---
|
||||
title: Use InfluxDB client libraries to write line protocol data
|
||||
description: >
|
||||
Use InfluxDB API clients to write line protocol data to InfluxDB Clustered.
|
||||
menu:
|
||||
influxdb_clustered:
|
||||
name: Use client libraries
|
||||
parent: Write line protocol
|
||||
identifier: write-client-libs
|
||||
weight: 103
|
||||
related:
|
||||
- /influxdb/clustered/reference/syntax/line-protocol/
|
||||
- /influxdb/clustered/get-started/write/
|
||||
---
|
||||
|
||||
Use InfluxDB client libraries to build line protocol, and then write it to an
|
||||
InfluxDB database.
|
||||
|
||||
- [Construct line protocol](#construct-line-protocol)
|
||||
- [Set up your project](#set-up-your-project)
|
||||
- [Construct points and write line protocol](#construct-points-and-write-line-protocol)
|
||||
- [Run the example](#run-the-example)
|
||||
- [Home sensor data line protocol](#home-sensor-data-line-protocol)
|
||||
|
||||
## Construct line protocol
|
||||
|
||||
With a [basic understanding of line protocol](/influxdb/clustered/write-data/line-protocol/),
|
||||
you can now construct line protocol and write data to InfluxDB.
|
||||
Consider a use case where you collect data from sensors in your home.
|
||||
Each sensor collects temperature, humidity, and carbon monoxide readings.
|
||||
To collect this data, use the following schema:
|
||||
|
||||
- **measurement**: `home`
|
||||
- **tags**
|
||||
- `room`: Living Room or Kitchen
|
||||
- **fields**
|
||||
- `temp`: temperature in °C (float)
|
||||
- `hum`: percent humidity (float)
|
||||
- `co`: carbon monoxide in parts per million (integer)
|
||||
- **timestamp**: Unix timestamp in _second_ precision
|
||||
|
||||
The following example shows how to construct and write points that follow this schema.
|
||||
|
||||
## Set up your project
|
||||
|
||||
The examples in this guide assume you followed [Set up InfluxDB](/influxdb/clustered/get-started/setup/)
|
||||
and [Write data set up](/influxdb/clustered/get-started/write/#set-up-your-project-and-credentials)
|
||||
instructions in [Get started](/influxdb/clustered/get-started/).
|
||||
|
||||
After setting up InfluxDB and your project, you should have the following:
|
||||
|
||||
- {{< product-name >}} credentials:
|
||||
|
||||
- [Database](/influxdb/clustered/admin/databases/)
|
||||
- [Database token](/influxdb/clustered/admin/tokens/#database-tokens)
|
||||
- Cluster hostname
|
||||
|
||||
- A directory for your project.
|
||||
|
||||
- Credentials stored as environment variables or in a project configuration file--for example, a `.env` ("dotenv") file.
|
||||
|
||||
- Client libraries installed for writing data to InfluxDB.
|
||||
|
||||
The following example shows how to construct `Point` objects that follow the [example `home` schema](#example-home-schema), and then write the points as line protocol to an
|
||||
{{% product-name %}} database.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[Go](#)
|
||||
[Node.js](#)
|
||||
[Python](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
<!-- BEGIN GO PROJECT SETUP -->
|
||||
|
||||
1. Install [Go 1.13 or later](https://golang.org/doc/install).
|
||||
|
||||
2. Inside of your project directory, install the client package to your project dependencies.
|
||||
|
||||
```sh
|
||||
go get github.com/influxdata/influxdb-client-go/v2
|
||||
```
|
||||
|
||||
<!-- END GO SETUP PROJECT -->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!-- BEGIN NODE.JS PROJECT SETUP -->
|
||||
|
||||
Inside of your project directory, install the `@influxdata/influxdb-client` InfluxDB v2 JavaScript client library.
|
||||
|
||||
```sh
|
||||
npm install --save @influxdata/influxdb-client
|
||||
```
|
||||
|
||||
<!-- END NODE.JS SETUP PROJECT -->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!-- BEGIN PYTHON SETUP PROJECT -->
|
||||
|
||||
1. **Optional, but recommended**: Use [`venv`](https://docs.python.org/3/library/venv.html)) or [`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual environment for installing and executing code--for example:
|
||||
|
||||
Inside of your project directory, enter the following command using `venv` to create and activate a virtual environment for the project:
|
||||
|
||||
```sh
|
||||
python3 -m venv envs/env1 && source ./envs/env1/bin/activate
|
||||
```
|
||||
|
||||
2. Install the [`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python), which provides the InfluxDB `influxdb_client_3` Python client library module and also installs the [`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for working with Arrow data.
|
||||
|
||||
```sh
|
||||
pip install influxdb3-python
|
||||
```
|
||||
|
||||
<!-- END PYTHON SETUP PROJECT -->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
## Construct points and write line protocol
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[Go](#)
|
||||
[Node.js](#)
|
||||
[Python](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
<!-- BEGIN GO SETUP SAMPLE -->
|
||||
|
||||
1. Create a file for your module--for example: `write-point.go`.
|
||||
|
||||
2. In `write-point.go`, enter the following sample code:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
"fmt"
|
||||
"github.com/influxdata/influxdb-client-go/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Set a log level constant
|
||||
const debugLevel uint = 4
|
||||
|
||||
/**
|
||||
* Define options for the client.
|
||||
* Instantiate the client with the following arguments:
|
||||
* - An object containing InfluxDB URL and token credentials.
|
||||
* - Write options for batch size and timestamp precision.
|
||||
**/
|
||||
clientOptions := influxdb2.DefaultOptions().
|
||||
SetBatchSize(20).
|
||||
SetLogLevel(debugLevel).
|
||||
SetPrecision(time.Second)
|
||||
|
||||
client := influxdb2.NewClientWithOptions(os.Getenv("INFLUX_URL"),
|
||||
os.Getenv("INFLUX_TOKEN"),
|
||||
clientOptions)
|
||||
|
||||
/**
|
||||
* Create an asynchronous, non-blocking write client.
|
||||
* Provide your InfluxDB org and database as arguments
|
||||
**/
|
||||
writeAPI := client.WriteAPI(os.Getenv("INFLUX_ORG"), "get-started")
|
||||
|
||||
// Get the errors channel for the asynchronous write client.
|
||||
errorsCh := writeAPI.Errors()
|
||||
|
||||
/** Create a point.
|
||||
* Provide measurement, tags, and fields as arguments.
|
||||
**/
|
||||
p := influxdb2.NewPointWithMeasurement("home").
|
||||
AddTag("room", "Kitchen").
|
||||
AddField("temp", 72.0).
|
||||
AddField("hum", 20.2).
|
||||
AddField("co", 9).
|
||||
SetTime(time.Now())
|
||||
|
||||
// Define a proc for handling errors.
|
||||
go func() {
|
||||
for err := range errorsCh {
|
||||
fmt.Printf("write error: %s\n", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
// Write the point asynchronously
|
||||
writeAPI.WritePoint(p)
|
||||
|
||||
// Send pending writes from the buffer to the database.
|
||||
writeAPI.Flush()
|
||||
|
||||
// Ensure background processes finish and release resources.
|
||||
client.Close()
|
||||
}
|
||||
```
|
||||
<!-- END GO SETUP SAMPLE -->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!-- BEGIN NODE.JS SETUP SAMPLE -->
|
||||
|
||||
1. Create a file for your module--for example: `write-point.js`.
|
||||
|
||||
2. In `write-point.js`, enter the following sample code:
|
||||
|
||||
```js
|
||||
'use strict'
|
||||
/** @module write
|
||||
* Use the JavaScript client library for Node.js. to create a point and write it to InfluxDB
|
||||
**/
|
||||
|
||||
import {InfluxDB, Point} from '@influxdata/influxdb-client'
|
||||
|
||||
/** Get credentials from the environment **/
|
||||
const url = process.env.INFLUX_URL
|
||||
const token = process.env.INFLUX_TOKEN
|
||||
const org = process.env.INFLUX_ORG
|
||||
|
||||
/**
|
||||
* Instantiate a client with a configuration object
|
||||
* that contains your InfluxDB URL and token.
|
||||
**/
|
||||
const influxDB = new InfluxDB({url, token})
|
||||
|
||||
/**
|
||||
* Create a write client configured to write to the database.
|
||||
* Provide your InfluxDB org and database.
|
||||
**/
|
||||
const writeApi = influxDB.getWriteApi(org, 'get-started')
|
||||
|
||||
/**
|
||||
* Create a point and add tags and fields.
|
||||
* To add a field, call the field method for your data type.
|
||||
**/
|
||||
const point1 = new Point('home')
|
||||
.tag('room', 'Kitchen')
|
||||
.floatField('temp', 72.0)
|
||||
.floatField('hum', 20.2)
|
||||
.intField('co', 9)
|
||||
console.log(` ${point1}`)
|
||||
|
||||
/**
|
||||
* Add the point to the batch.
|
||||
**/
|
||||
writeApi.writePoint(point1)
|
||||
|
||||
/**
|
||||
* Flush pending writes in the batch from the buffer and close the write client.
|
||||
**/
|
||||
writeApi.close().then(() => {
|
||||
console.log('WRITE FINISHED')
|
||||
})
|
||||
```
|
||||
<!-- END NODE.JS SETUP SAMPLE -->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!-- BEGIN PYTHON SETUP SAMPLE -->
|
||||
|
||||
1. Create a file for your module--for example: `write-point.py`.
|
||||
|
||||
2. In `write-point.py`, enter the following sample code to write data in batching mode:
|
||||
|
||||
```python
|
||||
import os
|
||||
from influxdb_client_3 import Point, write_client_options, WritePrecision, WriteOptions, InfluxDBError
|
||||
|
||||
# Create an array of points with tags and fields.
|
||||
points = [Point("home")
|
||||
.tag("room", "Kitchen")
|
||||
.field("temp", 25.3)
|
||||
.field('hum', 20.2)
|
||||
.field('co', 9)]
|
||||
|
||||
# With batching mode, define callbacks to execute after a successful or failed write request.
|
||||
# Callback methods receive the configuration and data sent in the request.
|
||||
def success(self, data: str):
|
||||
print(f"Successfully wrote batch: data: {data}")
|
||||
|
||||
def error(self, data: str, exception: InfluxDBError):
|
||||
print(f"Failed writing batch: config: {self}, data: {data} due: {exception}")
|
||||
|
||||
def retry(self, data: str, exception: InfluxDBError):
|
||||
print(f"Failed retry writing batch: config: {self}, data: {data} retry: {exception}")
|
||||
|
||||
# Configure options for batch writing.
|
||||
write_options = WriteOptions(batch_size=500,
|
||||
flush_interval=10_000,
|
||||
jitter_interval=2_000,
|
||||
retry_interval=5_000,
|
||||
max_retries=5,
|
||||
max_retry_delay=30_000,
|
||||
exponential_base=2)
|
||||
|
||||
# Create an options dict that sets callbacks and WriteOptions.
|
||||
wco = write_client_options(success_callback=success,
|
||||
error_callback=error,
|
||||
retry_callback=retry,
|
||||
WriteOptions=write_options)
|
||||
|
||||
# Instantiate a synchronous instance of the client with your
|
||||
# InfluxDB credentials and write options.
|
||||
with InfluxDBClient3(host=config['INFLUX_HOST'],
|
||||
token=config['INFLUX_TOKEN'],
|
||||
database=config['INFLUX_DATABASE'],
|
||||
write_client_options=wco) as client:
|
||||
|
||||
client.write(points, write_precision='s')
|
||||
```
|
||||
<!-- END PYTHON SETUP PROJECT -->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
The sample code does the following:
|
||||
|
||||
1. Instantiates a client configured with the InfluxDB URL and API token.
|
||||
|
||||
2. Uses the client to instantiate a **write client** with credentials.
|
||||
|
||||
3. Constructs a `Point` object with the [measurement](/influxdb/clustered/reference/glossary/#measurement) name (`"home"`).
|
||||
|
||||
4. Adds a tag and fields to the point.
|
||||
|
||||
5. Adds the point to a batch to be written to the database.
|
||||
|
||||
6. Sends the batch to InfluxDB and waits for the response.
|
||||
|
||||
7. Executes callbacks for the response, flushes the write buffer, and releases resources.
|
||||
|
||||
## Run the example
|
||||
|
||||
To run the sample and write the data to your InfluxDB Clustered database, enter the following command in your terminal:
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[Go](#)
|
||||
[Node.js](#)
|
||||
[Python](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
<!-- BEGIN GO RUN EXAMPLE -->
|
||||
|
||||
```sh
|
||||
go run write-point.go
|
||||
```
|
||||
|
||||
<!-- END GO RUN EXAMPLE -->
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
<!-- BEGIN NODE.JS RUN EXAMPLE -->
|
||||
|
||||
```sh
|
||||
node write-point.js
|
||||
```
|
||||
<!-- END NODE.JS RUN EXAMPLE -->
|
||||
{{% /code-tab-content %}}
|
||||
|
||||
{{% code-tab-content %}}
|
||||
<!-- BEGIN PYTHON RUN EXAMPLE -->
|
||||
|
||||
```sh
|
||||
python write-point.py
|
||||
```
|
||||
<!-- END PYTHON RUN EXAMPLE -->
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
The example logs the point as line protocol to stdout, and then writes the point to the database.
|
||||
The line protocol is similar to the following:
|
||||
|
||||
### Home sensor data line protocol
|
||||
|
||||
```sh
|
||||
home,room=Kitchen co=9i,hum=20.2,temp=72 1641024000
|
||||
```
|
|
@ -1,165 +0,0 @@
|
|||
---
|
||||
title: Use the influxctl CLI to write line protocol data
|
||||
description: >
|
||||
Use the [`influxctl` CLI](/influxdb/clustered/reference/cli/influxctl/)
|
||||
to write line protocol data to InfluxDB Clustered.
|
||||
menu:
|
||||
influxdb_clustered:
|
||||
name: Use the influxctl CLI
|
||||
parent: Write line protocol
|
||||
identifier: write-influxctl
|
||||
weight: 101
|
||||
related:
|
||||
- /influxdb/clustered/reference/cli/influxctl/write/
|
||||
- /influxdb/clustered/reference/syntax/line-protocol/
|
||||
- /influxdb/clustered/get-started/write/
|
||||
---
|
||||
|
||||
Use the [`influxctl` CLI](/influxdb/clustered/reference/cli/influxctl/)
|
||||
to write line protocol data to {{< product-name >}}.
|
||||
|
||||
- [Construct line protocol](#construct-line-protocol)
|
||||
- [Write the line protocol to InfluxDB](#write-the-line-protocol-to-influxdb)
|
||||
|
||||
## Construct line protocol
|
||||
|
||||
With a [basic understanding of line protocol](/influxdb/clustered/write-data/line-protocol/),
|
||||
you can now construct line protocol and write data to InfluxDB.
|
||||
Consider a use case where you collect data from sensors in your home.
|
||||
Each sensor collects temperature, humidity, and carbon monoxide readings.
|
||||
To collect this data, use the following schema:
|
||||
|
||||
- **measurement**: `home`
|
||||
- **tags**
|
||||
- `room`: Living Room or Kitchen
|
||||
- **fields**
|
||||
- `temp`: temperature in °C (float)
|
||||
- `hum`: percent humidity (float)
|
||||
- `co`: carbon monoxide in parts per million (integer)
|
||||
- **timestamp**: Unix timestamp in _second_ precision
|
||||
|
||||
The following line protocol represent the schema described above:
|
||||
|
||||
```
|
||||
home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000
|
||||
home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000
|
||||
home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600
|
||||
home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600
|
||||
home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200
|
||||
home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200
|
||||
home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800
|
||||
home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800
|
||||
home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400
|
||||
home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400
|
||||
home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000
|
||||
home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000
|
||||
```
|
||||
|
||||
For this tutorial, you can either pass this line protocol directly to the
|
||||
`influxctl write` command as a string, via `stdin`, or you can save it to and read
|
||||
it from a file.
|
||||
|
||||
## Write the line protocol to InfluxDB
|
||||
|
||||
Use the [`influxctl write` command](/influxdb/clustered/reference/cli/influxctl/write/)
|
||||
to write the [home sensor sample data](#home-sensor-data-line-protocol) to your
|
||||
{{< product-name omit=" Clustered" >}} cluster.
|
||||
Provide the following:
|
||||
|
||||
- The [database](/influxdb/clustered/admin/databases/) name using the `--database` flag
|
||||
- A [database token](/influxdb/clustered/admin/tokens/#database-tokens) (with write permissions
|
||||
on the target database) using the `--token` flag
|
||||
- The timestamp precision as seconds (`s`) using the `--precision` flag
|
||||
- [Line protocol](#construct-line-protocol).
|
||||
Pass the line protocol in one of the following ways:
|
||||
|
||||
- a string on the command line
|
||||
- a path to a file that contains the query
|
||||
- a single dash (`-`) to read the query from stdin
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[string](#)
|
||||
[file](#)
|
||||
[stdin](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
{{% influxdb/custom-timestamps %}}
|
||||
{{% code-placeholders "DATABASE_(NAME|TOKEN)|(LINE_PROTOCOL_FILEPATH)" %}}
|
||||
```sh
|
||||
influxctl write \
|
||||
--database DATABASE_NAME \
|
||||
--token DATABASE_TOKEN \
|
||||
--precision s \
|
||||
'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000
|
||||
home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000
|
||||
home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600
|
||||
home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600
|
||||
home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200
|
||||
home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200
|
||||
home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800
|
||||
home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800
|
||||
home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400
|
||||
home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400
|
||||
home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000
|
||||
home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000'
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
{{% /influxdb/custom-timestamps %}}
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||
Name of the database to write to.
|
||||
- {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}:
|
||||
Database token with write permissions on the target database.
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
{{% code-placeholders "DATABASE_(NAME|TOKEN)|(LINE_PROTOCOL_FILEPATH)" %}}
|
||||
```sh
|
||||
influxctl write \
|
||||
--database DATABASE_NAME \
|
||||
--token DATABASE_TOKEN \
|
||||
--precision s \
|
||||
LINE_PROTOCOL_FILEPATH
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||
Name of the database to write to.
|
||||
- {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}:
|
||||
Database token with write permissions on the target database.
|
||||
- {{% code-placeholder-key %}}`LINE_PROTOCOL_FILEPATH`{{% /code-placeholder-key %}}:
|
||||
File path to the file containing the line protocol. Can be an absolute file path
|
||||
or relative to the current working directory.
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
{{% code-placeholders "DATABASE_(NAME|TOKEN)|(LINE_PROTOCOL_FILEPATH)" %}}
|
||||
```sh
|
||||
cat LINE_PROTOCOL_FILEPATH | influxctl write \
|
||||
--database DATABASE_NAME \
|
||||
--token DATABASE_TOKEN \
|
||||
--precision s \
|
||||
-
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||
Name of the database to write to.
|
||||
- {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}:
|
||||
Database token with write permissions on the target database.
|
||||
- {{% code-placeholder-key %}}`LINE_PROTOCOL_FILEPATH`{{% /code-placeholder-key %}}:
|
||||
File path to the file containing the line protocol. Can be an absolute file path
|
||||
or relative to the current working directory.
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
|
@ -43,7 +43,7 @@ Each line of line protocol contains the following elements:
|
|||
|
||||
{{< req type="key" >}}
|
||||
|
||||
- {{< req "\*" >}} **measurement**: String that identifies the [measurement](/influxdb/clustered/reference/glossary/#measurement) to store the data in.
|
||||
- {{< req "\*" >}} **measurement**: A string that identifies the [table](/influxdb/clustered/reference/glossary/#table) to store the data in.
|
||||
- **tag set**: Comma-delimited list of key value pairs, each representing a tag.
|
||||
Tag keys and values are unquoted strings. _Spaces, commas, and equal characters must be escaped._
|
||||
- {{< req "\*" >}} **field set**: Comma-delimited list of key value pairs, each representing a field.
|
|
@ -0,0 +1,463 @@
|
|||
---
|
||||
title: Use InfluxDB client libraries to write line protocol data
|
||||
description: >
|
||||
Use InfluxDB API clients to write points as line protocol data to InfluxDB
|
||||
Clustered.
|
||||
menu:
|
||||
influxdb_clustered:
|
||||
name: Use client libraries
|
||||
parent: Write line protocol
|
||||
identifier: write-client-libs
|
||||
weight: 103
|
||||
related:
|
||||
- /influxdb/clustered/reference/syntax/line-protocol/
|
||||
- /influxdb/clustered/get-started/write/
|
||||
---
|
||||
|
||||
Use InfluxDB client libraries to build time series points, and then write them
|
||||
line protocol to an {{% product-name %}} database.
|
||||
|
||||
- [Construct line protocol](#construct-line-protocol)
|
||||
- [Example home schema](#example-home-schema)
|
||||
- [Set up your project](#set-up-your-project)
|
||||
- [Construct points and write line protocol](#construct-points-and-write-line-protocol)
|
||||
|
||||
## Construct line protocol
|
||||
|
||||
With a
|
||||
[basic understanding of line protocol](/influxdb/clustered/write-data/line-protocol/),
|
||||
you can construct line protocol data and write it to InfluxDB.
|
||||
|
||||
All InfluxDB client libraries write data in line protocol format to InfluxDB.
|
||||
Client library `write` methods let you provide data as raw line protocol or as
|
||||
`Point` objects that the client library converts to line protocol. If your
|
||||
program creates the data you write to InfluxDB, use the client library `Point`
|
||||
interface to take advantage of type safety in your program.
|
||||
|
||||
### Example home schema
|
||||
|
||||
Consider a use case where you collect data from sensors in your home. Each
|
||||
sensor collects temperature, humidity, and carbon monoxide readings.
|
||||
|
||||
To collect this data, use the following schema:
|
||||
|
||||
<!-- vale InfluxDataDocs.v3Schema = NO -->
|
||||
|
||||
- **measurement**: `home`
|
||||
- **tags**
|
||||
- `room`: Living Room or Kitchen
|
||||
- **fields**
|
||||
- `temp`: temperature in °C (float)
|
||||
- `hum`: percent humidity (float)
|
||||
- `co`: carbon monoxide in parts per million (integer)
|
||||
- **timestamp**: Unix timestamp in _second_ precision
|
||||
|
||||
<!-- vale InfluxDataDocs.v3Schema = YES -->
|
||||
|
||||
The following example shows how to construct and write points that follow the
|
||||
`home` schema.
|
||||
|
||||
## Set up your project
|
||||
|
||||
The examples in this guide assume you followed
|
||||
[Set up InfluxDB](/influxdb/clustered/get-started/setup/) and
|
||||
[Write data set up](/influxdb/clustered/get-started/write/#set-up-your-project-and-credentials)
|
||||
instructions in [Get started](/influxdb/clustered/get-started/).
|
||||
|
||||
After setting up InfluxDB and your project, you should have the following:
|
||||
|
||||
- {{< product-name >}} credentials:
|
||||
|
||||
- [Database](/influxdb/clustered/admin/databases/)
|
||||
- [Database token](/influxdb/clustered/admin/tokens/#database-tokens)
|
||||
- Cluster hostname
|
||||
|
||||
- A directory for your project.
|
||||
|
||||
- Credentials stored as environment variables or in a project configuration
|
||||
file--for example, a `.env` ("dotenv") file.
|
||||
|
||||
- Client libraries installed for writing data to InfluxDB.
|
||||
|
||||
The following example shows how to construct `Point` objects that follow the
|
||||
[example `home` schema](#example-home-schema), and then write the data as line
|
||||
protocol to an {{% product-name %}} database.
|
||||
|
||||
The examples use InfluxDB v3 client libraries. For examples using InfluxDB v2
|
||||
client libraries to write data to InfluxDB v3, see
|
||||
[InfluxDB v2 clients](/influxdb/clustered/reference/client-libraries/v2/).
|
||||
|
||||
{{< tabs-wrapper >}} {{% tabs %}} [Go](#) [Node.js](#) [Python](#) {{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
The following steps set up a Go project using the
|
||||
[InfluxDB v3 Go client](https://github.com/InfluxCommunity/influxdb3-go/):
|
||||
|
||||
<!-- BEGIN GO PROJECT SETUP -->
|
||||
|
||||
1. Install [Go 1.13 or later](https://golang.org/doc/install).
|
||||
|
||||
1. Create a directory for your Go module and change to the directory--for
|
||||
example:
|
||||
|
||||
```sh
|
||||
mkdir iot-starter-go && cd $_
|
||||
```
|
||||
|
||||
1. Initialize a Go module--for example:
|
||||
|
||||
```sh
|
||||
go mod init iot-starter
|
||||
```
|
||||
|
||||
1. Install [`influxdb3-go`](https://github.com/InfluxCommunity/influxdb3-go/),
|
||||
which provides the InfluxDB `influxdb3` Go client library module.
|
||||
|
||||
```sh
|
||||
go get github.com/InfluxCommunity/influxdb3-go
|
||||
```
|
||||
|
||||
<!-- END GO SETUP PROJECT -->
|
||||
|
||||
{{% /tab-content %}} {{% tab-content %}}
|
||||
|
||||
<!-- BEGIN NODE.JS PROJECT SETUP -->
|
||||
|
||||
The following steps set up a JavaScript project using the
|
||||
[InfluxDB v3 JavaScript client](https://github.com/InfluxCommunity/influxdb3-js/).
|
||||
|
||||
1. Install [Node.js](https://nodejs.org/en/download/).
|
||||
|
||||
1. Create a directory for your JavaScript project and change to the
|
||||
directory--for example:
|
||||
|
||||
```sh
|
||||
mkdir -p iot-starter-js && cd $_
|
||||
```
|
||||
|
||||
1. Initialize a project--for example, using `npm`:
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```sh
|
||||
npm init
|
||||
```
|
||||
|
||||
1. Install the `@influxdata/influxdb3-client` InfluxDB v3 JavaScript client
|
||||
library.
|
||||
|
||||
```sh
|
||||
npm install @influxdata/influxdb3-client
|
||||
```
|
||||
|
||||
<!-- END NODE.JS SETUP PROJECT -->
|
||||
|
||||
{{% /tab-content %}} {{% tab-content %}}
|
||||
|
||||
<!-- BEGIN PYTHON SETUP PROJECT -->
|
||||
|
||||
The following steps set up a Python project using the
|
||||
[InfluxDB v3 Python client](https://github.com/InfluxCommunity/influxdb3-python/):
|
||||
|
||||
1. Install [Python](https://www.python.org/downloads/)
|
||||
|
||||
1. Inside of your project directory, create a directory for your Python module
|
||||
and change to the module directory--for example:
|
||||
|
||||
```sh
|
||||
mkdir -p iot-starter-py && cd $_
|
||||
```
|
||||
|
||||
1. **Optional, but recommended**: Use
|
||||
[`venv`](https://docs.python.org/3/library/venv.html) or
|
||||
[`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual
|
||||
environment for installing and executing code--for example, enter the
|
||||
following command using `venv` to create and activate a virtual environment
|
||||
for the project:
|
||||
|
||||
```bash
|
||||
python3 -m venv envs/iot-starter && source ./envs/iot-starter/bin/activate
|
||||
```
|
||||
|
||||
1. Install
|
||||
[`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python),
|
||||
which provides the InfluxDB `influxdb_client_3` Python client library module
|
||||
and also installs the
|
||||
[`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for
|
||||
working with Arrow data.
|
||||
|
||||
```sh
|
||||
pip install influxdb3-python
|
||||
```
|
||||
|
||||
<!-- END PYTHON SETUP PROJECT -->
|
||||
|
||||
{{% /tab-content %}} {{< /tabs-wrapper >}}
|
||||
|
||||
## Construct points and write line protocol
|
||||
|
||||
Client libraries provide one or more `Point` constructor methods. Some libraries
|
||||
support language-native data structures, such as Go's `struct`, for creating
|
||||
points.
|
||||
|
||||
{{< tabs-wrapper >}} {{% tabs %}} [Go](#) [Node.js](#) [Python](#) {{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
<!-- BEGIN GO SETUP SAMPLE -->
|
||||
|
||||
1. Create a file for your module--for example: `main.go`.
|
||||
|
||||
1. In `main.go`, enter the following sample code:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"fmt"
|
||||
"time"
|
||||
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
|
||||
"github.com/influxdata/line-protocol/v2/lineprotocol"
|
||||
)
|
||||
|
||||
func Write() error {
|
||||
url := os.Getenv("INFLUX_HOST")
|
||||
token := os.Getenv("INFLUX_TOKEN")
|
||||
database := os.Getenv("INFLUX_DATABASE")
|
||||
|
||||
// To instantiate a client, call New() with InfluxDB credentials.
|
||||
client, err := influxdb3.New(influxdb3.ClientConfig{
|
||||
Host: url,
|
||||
Token: token,
|
||||
Database: database,
|
||||
})
|
||||
|
||||
/** Use a deferred function to ensure the client is closed when the
|
||||
* function returns.
|
||||
**/
|
||||
defer func (client *influxdb3.Client) {
|
||||
err = client.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}(client)
|
||||
|
||||
/** Use the NewPoint method to construct a point.
|
||||
* NewPoint(measurement, tags map, fields map, time)
|
||||
**/
|
||||
point := influxdb3.NewPoint("home",
|
||||
map[string]string{
|
||||
"room": "Living Room",
|
||||
},
|
||||
map[string]any{
|
||||
"temp": 24.5,
|
||||
"hum": 40.5,
|
||||
"co": 15i},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
/** Use the NewPointWithMeasurement method to construct a point with
|
||||
* method chaining.
|
||||
**/
|
||||
point2 := influxdb3.NewPointWithMeasurement("home").
|
||||
SetTag("room", "Living Room").
|
||||
SetField("temp", 23.5).
|
||||
SetField("hum", 38.0).
|
||||
SetField("co", 16i).
|
||||
SetTimestamp(time.Now())
|
||||
|
||||
fmt.Println("Writing points")
|
||||
points := []*influxdb3.Point{point, point2}
|
||||
|
||||
/** Write points to InfluxDB.
|
||||
* You can specify WriteOptions, such as Gzip threshold,
|
||||
* default tags, and timestamp precision. Default precision is lineprotocol.Nanosecond
|
||||
**/
|
||||
err = client.WritePoints(context.Background(), points,
|
||||
influxdb3.WithPrecision(lineprotocol.Second))
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
Write()
|
||||
}
|
||||
```
|
||||
|
||||
1. To run the module and write the data to your {{% product-name %}} database,
|
||||
enter the following command in your terminal:
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```sh
|
||||
go run main.go
|
||||
```
|
||||
|
||||
<!-- END GO SAMPLE -->
|
||||
|
||||
{{% /tab-content %}} {{% tab-content %}}
|
||||
|
||||
<!-- BEGIN NODE.JS SETUP SAMPLE -->
|
||||
|
||||
1. Create a file for your module--for example: `write-points.js`.
|
||||
|
||||
1. In `write-points.js`, enter the following sample code:
|
||||
|
||||
```js
|
||||
// write-points.js
|
||||
import { InfluxDBClient, Point } from '@influxdata/influxdb3-client';
|
||||
|
||||
/**
|
||||
* Set InfluxDB credentials.
|
||||
*/
|
||||
const host = process.env.INFLUX_HOST ?? '';
|
||||
const database = process.env.INFLUX_DATABASE;
|
||||
const token = process.env.INFLUX_TOKEN;
|
||||
|
||||
/**
|
||||
* Write line protocol to InfluxDB using the JavaScript client library.
|
||||
*/
|
||||
export async function writePoints() {
|
||||
/**
|
||||
* Instantiate an InfluxDBClient.
|
||||
* Provide the host URL and the database token.
|
||||
*/
|
||||
const client = new InfluxDBClient({ host, token });
|
||||
|
||||
/** Use the fluent interface with chained methods to construct Points. */
|
||||
const point = Point.measurement('home')
|
||||
.setTag('room', 'Living Room')
|
||||
.setFloatField('temp', 22.2)
|
||||
.setFloatField('hum', 35.5)
|
||||
.setIntegerField('co', 7)
|
||||
.setTimestamp(new Date().getTime() / 1000);
|
||||
|
||||
const point2 = Point.measurement('home')
|
||||
.setTag('room', 'Kitchen')
|
||||
.setFloatField('temp', 21.0)
|
||||
.setFloatField('hum', 35.9)
|
||||
.setIntegerField('co', 0)
|
||||
.setTimestamp(new Date().getTime() / 1000);
|
||||
|
||||
/** Write points to InfluxDB.
|
||||
* The write method accepts an array of points, the target database, and
|
||||
* an optional configuration object.
|
||||
* You can specify WriteOptions, such as Gzip threshold, default tags,
|
||||
* and timestamp precision. Default precision is lineprotocol.Nanosecond
|
||||
**/
|
||||
|
||||
try {
|
||||
await client.write([point, point2], database, '', { precision: 's' });
|
||||
console.log('Data has been written successfully!');
|
||||
} catch (error) {
|
||||
console.error(`Error writing data to InfluxDB: ${error.body}`);
|
||||
}
|
||||
|
||||
client.close();
|
||||
}
|
||||
|
||||
writePoints();
|
||||
```
|
||||
|
||||
1. To run the module and write the data to your {{\< product-name >}} database,
|
||||
enter the following command in your terminal:
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```sh
|
||||
node writePoints.js
|
||||
```
|
||||
|
||||
<!-- END NODE.JS SAMPLE -->
|
||||
|
||||
{{% /tab-content %}} {{% tab-content %}}
|
||||
|
||||
<!-- BEGIN PYTHON SETUP SAMPLE -->
|
||||
|
||||
1. Create a file for your module--for example: `write-points.py`.
|
||||
|
||||
1. In `write-points.py`, enter the following sample code to write data in
|
||||
batching mode:
|
||||
|
||||
```python
|
||||
import os
|
||||
from influxdb_client_3 import (
|
||||
InfluxDBClient3, InfluxDBError, Point, WritePrecision,
|
||||
WriteOptions, write_client_options)
|
||||
|
||||
host = os.getenv('INFLUX_HOST')
|
||||
token = os.getenv('INFLUX_TOKEN')
|
||||
database = os.getenv('INFLUX_DATABASE')
|
||||
|
||||
# Create an array of points with tags and fields.
|
||||
points = [Point("home")
|
||||
.tag("room", "Kitchen")
|
||||
.field("temp", 25.3)
|
||||
.field('hum', 20.2)
|
||||
.field('co', 9)]
|
||||
|
||||
# With batching mode, define callbacks to execute after a successful or
|
||||
# failed write request.
|
||||
# Callback methods receive the configuration and data sent in the request.
|
||||
def success(self, data: str):
|
||||
print(f"Successfully wrote batch: data: {data}")
|
||||
|
||||
def error(self, data: str, exception: InfluxDBError):
|
||||
print(f"Failed writing batch: config: {self}, data: {data} due: {exception}")
|
||||
|
||||
def retry(self, data: str, exception: InfluxDBError):
|
||||
print(f"Failed retry writing batch: config: {self}, data: {data} retry: {exception}")
|
||||
|
||||
# Configure options for batch writing.
|
||||
write_options = WriteOptions(batch_size=500,
|
||||
flush_interval=10_000,
|
||||
jitter_interval=2_000,
|
||||
retry_interval=5_000,
|
||||
max_retries=5,
|
||||
max_retry_delay=30_000,
|
||||
exponential_base=2)
|
||||
|
||||
# Create an options dict that sets callbacks and WriteOptions.
|
||||
wco = write_client_options(success_callback=success,
|
||||
error_callback=error,
|
||||
retry_callback=retry,
|
||||
write_options=write_options)
|
||||
|
||||
# Instantiate a synchronous instance of the client with your
|
||||
# InfluxDB credentials and write options, such as Gzip threshold, default tags,
|
||||
# and timestamp precision. Default precision is nanosecond ('ns').
|
||||
with InfluxDBClient3(host=host,
|
||||
token=token,
|
||||
database=database,
|
||||
write_client_options=wco) as client:
|
||||
|
||||
client.write(points, write_precision='s')
|
||||
```
|
||||
|
||||
1. To run the module and write the data to your {{< product-name >}} database,
|
||||
enter the following command in your terminal:
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```sh
|
||||
python write-points.py
|
||||
```
|
||||
|
||||
<!-- END PYTHON SETUP PROJECT -->
|
||||
|
||||
{{% /tab-content %}} {{< /tabs-wrapper >}}
|
||||
|
||||
The sample code does the following:
|
||||
|
||||
<!-- vale InfluxDataDocs.v3Schema = NO -->
|
||||
|
||||
1. Instantiates a client configured with the InfluxDB URL and API token.
|
||||
1. Constructs `home`
|
||||
[measurement](/influxdb/clustered/reference/glossary/#measurement)
|
||||
`Point` objects.
|
||||
1. Sends data as line protocol format to InfluxDB and waits for the response.
|
||||
1. If the write succeeds, logs the success message to stdout; otherwise, logs
|
||||
the failure message and error details.
|
||||
1. Closes the client to release resources.
|
||||
|
||||
<!-- vale InfluxDataDocs.v3Schema = YES -->
|
19
package.json
19
package.json
|
@ -5,7 +5,7 @@
|
|||
"description": "InfluxDB documentation",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@vvago/vale": "^3.0.7",
|
||||
"@vvago/vale": "^3.4.2",
|
||||
"autoprefixer": ">=10.2.5",
|
||||
"hugo-extended": ">=0.101.0",
|
||||
"husky": "^9.0.11",
|
||||
|
@ -20,13 +20,14 @@
|
|||
},
|
||||
"scripts": {
|
||||
"prepare": "husky",
|
||||
"test": "./test.sh"
|
||||
"lint-vale": ".ci/vale/vale.sh",
|
||||
"lint-staged": "lint-staged --relative"
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.{js,css,md}": "prettier --write",
|
||||
"content/influxdb/cloud-dedicated/**/*.md": "npx vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error --output=line",
|
||||
"content/influxdb/cloud-serverless/**/*.md": "npx vale --config=content/influxdb/cloud-serverless/.vale.ini --minAlertLevel=error --output=line",
|
||||
"content/influxdb/clustered/**/*.md": "npx vale --config=content/influxdb/clustered/.vale.ini --minAlertLevel=error --output=line",
|
||||
"content/influxdb/{cloud,v2,telegraf}/**/*.md": "npx vale --config=.vale.ini --minAlertLevel=error --output=line"
|
||||
}
|
||||
"main": "index.js",
|
||||
"module": "main.js",
|
||||
"directories": {
|
||||
"test": "test"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": ""
|
||||
}
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
# If you need more help, visit the Dockerfile reference guide at
|
||||
# https://docs.docker.com/engine/reference/builder/
|
||||
|
||||
# Starting from a Go base image is easier than setting up the Go environment later.
|
||||
FROM golang:latest
|
||||
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y \
|
||||
curl \
|
||||
git \
|
||||
gpg \
|
||||
jq \
|
||||
maven \
|
||||
nodejs \
|
||||
npm \
|
||||
wget
|
||||
|
||||
# Install test runner dependencies
|
||||
RUN apt-get install -y \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-venv
|
||||
|
||||
RUN ln -s /usr/bin/python3 /usr/bin/python
|
||||
|
||||
# Create a virtual environment for Python to avoid conflicts with the system Python and having to use the --break-system-packages flag when installing packages with pip.
|
||||
RUN python -m venv /opt/venv
|
||||
# Enable venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Prevents Python from writing pyc files.
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# the application crashes without emitting any logs due to buffering.
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
# RUN --mount=type=cache,target=/root/.cache/node_modules \
|
||||
# --mount=type=bind,source=package.json,target=package.json \
|
||||
# npm install
|
||||
|
||||
# Copy docs test directory to the image.
|
||||
WORKDIR /usr/src/app
|
||||
RUN chmod -R 755 .
|
||||
|
||||
ARG SOURCE_DIR
|
||||
|
||||
COPY data ./data
|
||||
# Install parse_yaml.sh and parse YAML config files into dotenv files to be used by tests.
|
||||
RUN /bin/bash -c 'curl -sO https://raw.githubusercontent.com/mrbaseman/parse_yaml/master/src/parse_yaml.sh'
|
||||
RUN /bin/bash -c 'source ./parse_yaml.sh && parse_yaml ./data/products.yml > .env.products'
|
||||
|
||||
COPY test ./test
|
||||
WORKDIR /usr/src/app/test
|
||||
COPY shared/fixtures ./tmp/data
|
||||
|
||||
# Some Python test dependencies (pytest-dotenv and pytest-codeblocks) aren't
|
||||
# available as packages in apt-cache, so use pip to download dependencies in a # separate step and use Docker's caching.
|
||||
# Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
|
||||
# Leverage a bind mount to requirements.txt to avoid having to copy them into
|
||||
# this layer.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
--mount=type=bind,source=test/requirements.txt,target=requirements.txt \
|
||||
pip install -Ur requirements.txt
|
||||
|
||||
COPY test/setup/run-tests.sh /usr/local/bin/run-tests.sh
|
||||
RUN chmod +x /usr/local/bin/run-tests.sh
|
||||
|
||||
# Install Telegraf for use in tests.
|
||||
# Follow the install instructions (https://docs.influxdata.com/telegraf/v1/install/?t=curl), except for sudo (which isn't available in Docker).
|
||||
# influxdata-archive_compat.key GPG fingerprint:
|
||||
# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
|
||||
RUN wget -q https://repos.influxdata.com/influxdata-archive_compat.key
|
||||
|
||||
RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
|
||||
|
||||
RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list
|
||||
|
||||
RUN apt-get update && apt-get install telegraf
|
||||
|
||||
# Install influx v2 Cloud CLI for use in tests.
|
||||
# Follow the install instructions(https://portal.influxdata.com/downloads/), except for sudo (which isn't available in Docker).
|
||||
# influxdata-archive_compat.key GPG fingerprint:
|
||||
# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
|
||||
RUN wget -q https://repos.influxdata.com/influxdata-archive_compat.key
|
||||
|
||||
RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
|
||||
|
||||
RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list
|
||||
|
||||
RUN apt-get update && apt-get install influxdb2-cli
|
||||
|
||||
ENV TEMP_DIR=./tmp
|
||||
|
||||
ENTRYPOINT [ "run-tests.sh" ]
|
||||
CMD [""]
|
66
test.sh
66
test.sh
|
@ -1,66 +0,0 @@
|
|||
#! /bin/bash
|
||||
|
||||
# Path: test.sh
|
||||
# Description:
|
||||
# This script is used to copy content files for testing and to run tests on tests on those temporary copies.
|
||||
# The temporary files are shared between the host and the Docker container
|
||||
# using a bind mount configured in compose.yaml.
|
||||
#
|
||||
# Docker compose now has an experimental file watch feature
|
||||
# (https://docs.docker.com/compose/file-watch/) that is likely preferable to the
|
||||
# strategy here.
|
||||
#
|
||||
# Usage:
|
||||
# The default behavior is to test all *.md files that have been added or modified in the current branch, effectively:
|
||||
#
|
||||
# `git diff --name-only --diff-filter=AM --relative master | grep -E '\.md$' | ./test.sh`
|
||||
#
|
||||
# To specify files to test, in your terminal command line, pass a file pattern as the only argument to the script--for example:
|
||||
#
|
||||
# sh test.sh ./content/**/*.md
|
||||
##
|
||||
|
||||
paths="$1"
|
||||
target=./test/tmp
|
||||
testrun=./test/.test-run.txt
|
||||
mkdir -p "$target"
|
||||
cat /dev/null > "$testrun"
|
||||
rm -rf "$target"/*
|
||||
|
||||
# Check if the user provided a path to copy.
|
||||
if [ -z "$paths" ]; then
|
||||
echo "No path provided. Running tests for *.md files that have been added or modified in the current branch."
|
||||
paths=$(git diff --name-only --diff-filter=AM HEAD | \
|
||||
grep -E '\.md$')
|
||||
|
||||
if [ -z "$paths" ]; then
|
||||
echo "No files found for pattern: $paths"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
paths=$(find "$paths" -type f -name '*.md')
|
||||
fi
|
||||
|
||||
# Log the list of files to be tested and copy them to the test directory.
|
||||
echo "$paths" >> "$testrun"
|
||||
echo "$paths" | rsync -arv --files-from=- . "$target"
|
||||
|
||||
# Build or rebuild a service if the Dockerfile or build directory have changed, and then run the tests.
|
||||
docker compose up test
|
||||
|
||||
# Troubleshoot tests
|
||||
# If you want to examine files or run commands for debugging tests,
|
||||
# start the container and use `exec` to open an interactive shell--for example:
|
||||
|
||||
# docker compose run -it --entrypoint=/bin/bash test
|
||||
|
||||
# To build and run a new container and debug test failures, use `docker compose run` which runs a one-off command in a new container. Pass additional flags to be used by the container's entrypoint and the test runners it executes--for example:
|
||||
|
||||
# docker compose run --rm test -v
|
||||
# docker compose run --rm test --entrypoint /bin/bash
|
||||
|
||||
# Or, pass the flags in the compose file--for example:
|
||||
# services:
|
||||
# test:
|
||||
# build:...
|
||||
# command: ["-vv"]
|
|
@ -8,9 +8,10 @@
|
|||
**/__pycache__
|
||||
**/.venv
|
||||
**/.classpath
|
||||
**/.config.toml
|
||||
**/.dockerignore
|
||||
**/.env
|
||||
**/.env.influxdbv3
|
||||
**/.env.*
|
||||
**/.git
|
||||
**/.gitignore
|
||||
**/.project
|
||||
|
@ -23,6 +24,7 @@
|
|||
**/*.jfm
|
||||
**/bin
|
||||
**/charts
|
||||
**/config.toml
|
||||
**/docker-compose*
|
||||
**/compose*
|
||||
**/Dockerfile*
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
/target
|
||||
/Cargo.lock
|
||||
config.toml
|
||||
content
|
||||
node_modules
|
||||
tmp
|
||||
.config*
|
||||
.env*
|
||||
**/.env.test
|
||||
.pytest_cache
|
||||
.test-run.txt
|
||||
|
|
|
@ -1,116 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script is used to run tests for the InfluxDB documentation.
|
||||
# The script is designed to be run in a Docker container. It is used to substitute placeholder values.
|
||||
|
||||
# Function to check if an option is present in the arguments
|
||||
has_option() {
|
||||
local target="$1"
|
||||
shift
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" == "$target" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
verbose=0
|
||||
# Check if "--option" is present in the CMD arguments
|
||||
if has_option "-v" "$@"; then
|
||||
verbose=1
|
||||
echo "Using verbose mode..."
|
||||
fi
|
||||
|
||||
BASE_DIR=$(pwd)
|
||||
cd $TEMP_DIR
|
||||
|
||||
for file in `find . -type f \( -iname '*.md' \)` ; do
|
||||
if [ -f "$file" ]; then
|
||||
echo "PRETEST: substituting values in $file"
|
||||
|
||||
# Replaces placeholder values with environment variable references.
|
||||
|
||||
# Non-language-specific replacements.
|
||||
sed -i 's|https:\/\/{{< influxdb/host >}}|$INFLUX_HOST|g;
|
||||
' $file
|
||||
|
||||
# Python-specific replacements.
|
||||
# Use f-strings to identify placeholders in Python while also keeping valid syntax if
|
||||
# the user replaces the value.
|
||||
# Remember to import os for your example code.
|
||||
sed -i 's/f"DATABASE_TOKEN"/os.getenv("INFLUX_TOKEN")/g;
|
||||
s/f"API_TOKEN"/os.getenv("INFLUX_TOKEN")/g;
|
||||
s/f"BUCKET_NAME"/os.getenv("INFLUX_DATABASE")/g;
|
||||
s/f"DATABASE_NAME"/os.getenv("INFLUX_DATABASE")/g;
|
||||
s|f"{{< influxdb/host >}}"|os.getenv("INFLUX_HOSTNAME")|g;
|
||||
s|f"RETENTION_POLICY_NAME\|RETENTION_POLICY"|"autogen"|g;
|
||||
' $file
|
||||
|
||||
# Shell-specific replacements.
|
||||
## In JSON Heredoc
|
||||
sed -i 's|"orgID": "ORG_ID"|"orgID": "$INFLUX_ORG"|g;
|
||||
s|"name": "BUCKET_NAME"|"name": "$INFLUX_DATABASE"|g;' \
|
||||
$file
|
||||
|
||||
sed -i 's/API_TOKEN/$INFLUX_TOKEN/g;
|
||||
s/ORG_ID/$INFLUX_ORG/g;
|
||||
s/DATABASE_TOKEN/$INFLUX_TOKEN/g;
|
||||
s/--bucket-id BUCKET_ID/--bucket-id $INFLUX_BUCKET_ID/g;
|
||||
s/BUCKET_NAME/$INFLUX_DATABASE/g;
|
||||
s/DATABASE_NAME/$INFLUX_DATABASE/g;
|
||||
s/--id DBRP_ID/--id $INFLUX_DBRP_ID/g;
|
||||
s/get-started/$INFLUX_DATABASE/g;
|
||||
s/RETENTION_POLICY_NAME\|RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g;
|
||||
s/CONFIG_NAME/CONFIG_$(shuf -i 0-100 -n1)/g;' \
|
||||
$file
|
||||
|
||||
# v2-specific replacements.
|
||||
sed -i 's|https:\/\/us-west-2-1.aws.cloud2.influxdata.com|$INFLUX_HOST|g;
|
||||
s|{{< latest-patch >}}|${influxdb_latest_patches_v2}|g;
|
||||
s|{{< latest-patch cli=true >}}|${influxdb_latest_cli_v2}|g;' \
|
||||
$file
|
||||
|
||||
# Skip package manager commands.
|
||||
sed -i 's|sudo dpkg.*$||g;
|
||||
s|sudo yum.*$||g;' \
|
||||
$file
|
||||
|
||||
# Environment-specific replacements.
|
||||
sed -i 's|sudo ||g;' \
|
||||
$file
|
||||
fi
|
||||
if [ $verbose -eq 1 ]; then
|
||||
echo "FILE CONTENTS:"
|
||||
cat $file
|
||||
fi
|
||||
done
|
||||
|
||||
# Miscellaneous test setup.
|
||||
# For macOS samples.
|
||||
mkdir -p ~/Downloads && rm -rf ~/Downloads/*
|
||||
# Clean up installed files from previous runs.
|
||||
gpg -q --batch --yes --delete-key D8FF8E1F7DF8B07E > /dev/null 2>&1
|
||||
|
||||
# Activate the Python virtual environment configured in the Dockerfile.
|
||||
. /opt/venv/bin/activate
|
||||
|
||||
# List installed Python dependencies.
|
||||
pip list
|
||||
|
||||
# Run test commands with options provided in the CMD of the Dockerfile.
|
||||
# pytest rootdir is the directory where pytest.ini is located (/test).
|
||||
if [ -d ./content/influxdb/cloud-dedicated/ ]; then
|
||||
echo "Running content/influxdb/cloud-dedicated tests..."
|
||||
pytest --codeblocks --envfile $BASE_DIR/.env.dedicated ./content/influxdb/cloud-dedicated/ $@
|
||||
fi
|
||||
|
||||
if [ -d ./content/influxdb/cloud-serverless/ ]; then
|
||||
echo "Running content/influxdb/cloud-serverless tests..."
|
||||
pytest --codeblocks --envfile $BASE_DIR/.env.serverless ./content/influxdb/cloud-serverless/ $@
|
||||
fi
|
||||
|
||||
if [ -d ./content/telegraf/ ]; then
|
||||
echo "Running content/telegraf tests..."
|
||||
pytest --codeblocks --envfile $BASE_DIR/.env.telegraf ./content/telegraf/ $@
|
||||
fi
|
|
@ -0,0 +1,105 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script is used to run tests for the InfluxDB documentation.
|
||||
# The script is designed to be run in a Docker container. It is used to substitute placeholder values in test files.
|
||||
|
||||
TEST_CONTENT="/app/content"
|
||||
|
||||
function substitute_placeholders {
|
||||
for file in `find "$TEST_CONTENT" -type f \( -iname '*.md' \)`; do
|
||||
if [ -f "$file" ]; then
|
||||
# echo "PRETEST: substituting values in $file"
|
||||
|
||||
# Replaces placeholder values with environment variable references.
|
||||
|
||||
# Non-language-specific replacements.
|
||||
sed -i 's|https:\/\/{{< influxdb/host >}}|$INFLUX_HOST|g;
|
||||
' $file
|
||||
|
||||
# Python-specific replacements.
|
||||
# Use f-strings to identify placeholders in Python while also keeping valid syntax if
|
||||
# the user replaces the value.
|
||||
# Remember to import os for your example code.
|
||||
sed -i 's/f"DATABASE_TOKEN"/os.getenv("INFLUX_TOKEN")/g;
|
||||
s/f"API_TOKEN"/os.getenv("INFLUX_TOKEN")/g;
|
||||
s/f"BUCKET_NAME"/os.getenv("INFLUX_DATABASE")/g;
|
||||
s/f"DATABASE_NAME"/os.getenv("INFLUX_DATABASE")/g;
|
||||
s|f"{{< influxdb/host >}}"|os.getenv("INFLUX_HOSTNAME")|g;
|
||||
s|f"RETENTION_POLICY_NAME\|RETENTION_POLICY"|"autogen"|g;
|
||||
' $file
|
||||
|
||||
# Shell-specific replacements.
|
||||
## In JSON Heredoc
|
||||
sed -i 's|"orgID": "ORG_ID"|"orgID": "$INFLUX_ORG"|g;
|
||||
s|"name": "BUCKET_NAME"|"name": "$INFLUX_DATABASE"|g;' \
|
||||
$file
|
||||
|
||||
sed -i 's/API_TOKEN/$INFLUX_TOKEN/g;
|
||||
s/ORG_ID/$INFLUX_ORG/g;
|
||||
s/DATABASE_TOKEN/$INFLUX_TOKEN/g;
|
||||
s/--bucket-id BUCKET_ID/--bucket-id $INFLUX_BUCKET_ID/g;
|
||||
s/BUCKET_NAME/$INFLUX_DATABASE/g;
|
||||
s/DATABASE_NAME/$INFLUX_DATABASE/g;
|
||||
s/--id DBRP_ID/--id $INFLUX_DBRP_ID/g;
|
||||
s/get-started/$INFLUX_DATABASE/g;
|
||||
s/RETENTION_POLICY_NAME\|RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g;
|
||||
s/CONFIG_NAME/CONFIG_$(shuf -i 0-100 -n1)/g;' \
|
||||
$file
|
||||
|
||||
# v2-specific replacements.
|
||||
sed -i 's|https:\/\/us-west-2-1.aws.cloud2.influxdata.com|$INFLUX_HOST|g;
|
||||
s|{{< latest-patch >}}|${influxdb_latest_patches_v2}|g;
|
||||
s|{{< latest-patch cli=true >}}|${influxdb_latest_cli_v2}|g;' \
|
||||
$file
|
||||
|
||||
# Skip package manager commands.
|
||||
sed -i 's|sudo dpkg.*$||g;
|
||||
s|sudo yum.*$||g;' \
|
||||
$file
|
||||
|
||||
# Environment-specific replacements.
|
||||
sed -i 's|sudo ||g;' \
|
||||
$file
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
setup() {
|
||||
# Parse YAML config files into dotenv files to be used by tests.
|
||||
parse_yaml /app/appdata/products.yml > /app/appdata/.env.products
|
||||
|
||||
# Miscellaneous test setup.
|
||||
# For macOS samples.
|
||||
mkdir -p ~/Downloads && rm -rf ~/Downloads/*
|
||||
}
|
||||
|
||||
prepare_tests() {
|
||||
TEST_FILES="$*"
|
||||
|
||||
# Remove files from the previous run.
|
||||
rm -rf "$TEST_CONTENT"/*
|
||||
# Copy the test files to the target directory while preserving the directory structure.
|
||||
for FILE in $TEST_FILES; do
|
||||
# Create the parent directories of the destination file
|
||||
#mkdir -p "$(dirname "$TEST_TARGET/$FILE")"
|
||||
# Copy the file
|
||||
rsync -avz --relative --log-file=./test.log "$FILE" /app/
|
||||
done
|
||||
|
||||
substitute_placeholders
|
||||
}
|
||||
|
||||
# If arguments were passed and the first argument is not --files, run the command. This is useful for running "/bin/bash" for debugging the container.
|
||||
# If --files is passed, prepare all remaining arguments as test files.
|
||||
# Otherwise (no arguments), run the setup function and return existing files to be tested.
|
||||
if [ "$1" != "--files" ]; then
|
||||
echo "Executing $0 without --files argument."
|
||||
"$@"
|
||||
fi
|
||||
if [ "$1" == "--files" ]; then
|
||||
shift
|
||||
prepare_tests "$@"
|
||||
fi
|
||||
setup
|
||||
# Return new or existing files to be tested.
|
||||
find "$TEST_CONTENT" -type f -name '*.md'
|
Loading…
Reference in New Issue