Merge branch 'master' into gw-ldaps-docs
commit
111faf9bca
|
|
@ -1,14 +1,12 @@
|
|||
FROM registry.gitlab.com/pipeline-components/remark-lint:latest
|
||||
|
||||
|
||||
WORKDIR /app/
|
||||
|
||||
# Generic
|
||||
#RUN apk add --no-cache
|
||||
COPY /.ci/remark-lint /app/
|
||||
|
||||
# Node
|
||||
ENV PATH "$PATH:/app/node_modules/.bin/"
|
||||
ENV PATH="$PATH:/app/node_modules/.bin/"
|
||||
RUN yarn install && yarn cache clean
|
||||
ENV NODE_PATH=/app/node_modules/
|
||||
RUN ln -nfs /app/node_modules /node_modules
|
||||
|
|
@ -23,3 +21,6 @@ LABEL \
|
|||
org.label-schema.name="Remark-lint" \
|
||||
org.label-schema.schema-version="1.0" \
|
||||
org.label-schema.url="https://pipeline-components.gitlab.io/"
|
||||
|
||||
ENTRYPOINT [ "remark" ]
|
||||
CMD [ "" ]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,25 @@
|
|||
import remarkPresetLintConsistent from 'remark-preset-lint-consistent';
|
||||
import remarkPresetLintRecommended from 'remark-preset-lint-recommended';
|
||||
import remarkPresetLintMarkdownStyleGuide from 'remark-preset-lint-markdown-style-guide';
|
||||
import remarkFrontmatter from 'remark-frontmatter';
|
||||
import remarkFrontmatterSchema from 'remark-lint-frontmatter-schema';
|
||||
import remarkNoShellDollars from 'remark-lint-no-shell-dollars';
|
||||
import remarkToc from 'remark-toc';
|
||||
|
||||
const remarkConfig = {
|
||||
settings: {
|
||||
bullet: '-',
|
||||
plugins: [
|
||||
remarkPresetLintConsistent,
|
||||
remarkPresetLintRecommended,
|
||||
remarkPresetLintMarkdownStyleGuide,
|
||||
remarkFrontmatter,
|
||||
remarkFrontmatterSchema,
|
||||
remarkNoShellDollars,
|
||||
// Generate a table of contents in `## Contents`
|
||||
[remarkToc, { heading: '' }],
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
export default remarkConfig;
|
||||
|
|
@ -435,9 +435,9 @@ concat-stream@^2.0.0:
|
|||
typedarray "^0.0.6"
|
||||
|
||||
cross-spawn@^7.0.0:
|
||||
version "7.0.3"
|
||||
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
|
||||
integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
|
||||
version "7.0.6"
|
||||
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f"
|
||||
integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==
|
||||
dependencies:
|
||||
path-key "^3.1.0"
|
||||
shebang-command "^2.0.0"
|
||||
|
|
|
|||
|
|
@ -5,3 +5,9 @@ level: error
|
|||
nonword: true
|
||||
tokens:
|
||||
- \d+(?:st|nd|rd|th)
|
||||
- tenth
|
||||
|
||||
exceptions:
|
||||
- 0th
|
||||
- 50th
|
||||
- 90th
|
||||
|
|
|
|||
|
|
@ -5,5 +5,5 @@ nonword: true
|
|||
level: error
|
||||
tokens:
|
||||
- \b\d+(?:B|kB|MB|GB|TB)
|
||||
# Ignore duration literals in code blocks.
|
||||
- \b(?!\`)\d+(?:ns|ms|s|min|h|d)
|
||||
# Match time units, but not duration literals inside code blocks.
|
||||
- \b`(\n)?\d+(ns|ms|s|min|h|d)`\b
|
||||
|
|
|
|||
|
|
@ -128,6 +128,7 @@ left
|
|||
level
|
||||
like
|
||||
local
|
||||
locf
|
||||
lower
|
||||
match
|
||||
max
|
||||
|
|
|
|||
|
|
@ -55,7 +55,6 @@ swap:
|
|||
fewer data: less data
|
||||
file name: filename
|
||||
firewalls: firewall rules
|
||||
fully qualified: fully-qualified
|
||||
functionality: capability|feature
|
||||
Google account: Google Account
|
||||
Google accounts: Google Accounts
|
||||
|
|
|
|||
|
|
@ -68,10 +68,10 @@ influx3
|
|||
influxctl
|
||||
influxd
|
||||
influxdata.com
|
||||
iox
|
||||
(iox|IOx)
|
||||
keep-url
|
||||
lat
|
||||
locf
|
||||
(locf|LOCF)
|
||||
logicalplan
|
||||
noaa|NOAA
|
||||
npm|NPM
|
||||
|
|
|
|||
|
|
@ -48,9 +48,6 @@ call_lefthook()
|
|||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif command -v npx >/dev/null 2>&1
|
||||
then
|
||||
npx lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -48,9 +48,6 @@ call_lefthook()
|
|||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif command -v npx >/dev/null 2>&1
|
||||
then
|
||||
npx lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -48,9 +48,6 @@ call_lefthook()
|
|||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif command -v npx >/dev/null 2>&1
|
||||
then
|
||||
npx lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -48,9 +48,6 @@ call_lefthook()
|
|||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif command -v npx >/dev/null 2>&1
|
||||
then
|
||||
npx lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -57,6 +57,17 @@ Install [Docker](https://docs.docker.com/get-docker/) for your system.
|
|||
|
||||
docs-v2 includes Docker configurations (`compose.yaml` and Dockerfiles) for running the Vale style linter and tests for code blocks (Shell, Bash, and Python) in Markdown files.
|
||||
|
||||
#### Build the test dependency image
|
||||
|
||||
After you have installed Docker, run the following command to build the test
|
||||
dependency image, `influxdata:docs-pytest`.
|
||||
The tests defined in `compose.yaml` use the dependencies and execution
|
||||
environment from this image.
|
||||
|
||||
```bash
|
||||
docker build -t influxdata:docs-pytest -f Dockerfile.pytest .
|
||||
```
|
||||
|
||||
### Run the documentation locally (optional)
|
||||
|
||||
To run the documentation locally, follow the instructions provided in the README.
|
||||
|
|
|
|||
|
|
@ -10,8 +10,12 @@ RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influ
|
|||
|
||||
RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list
|
||||
|
||||
# Install InfluxDB clients to use in tests.
|
||||
# Install depedencies for clients and tests.
|
||||
# - InfluxData clients to use in tests.
|
||||
# - apt-utils for verification tools
|
||||
# - perl for shasum (default on MacOS)
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y \
|
||||
apt-utils \
|
||||
curl \
|
||||
git \
|
||||
gpg \
|
||||
|
|
@ -22,6 +26,7 @@ RUN apt-get update && apt-get upgrade -y && apt-get install -y \
|
|||
maven \
|
||||
nodejs \
|
||||
npm \
|
||||
perl \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-venv \
|
||||
|
|
@ -52,7 +57,7 @@ COPY ./test/pytest/pytest.ini pytest.ini
|
|||
# Python and Pytest dependencies.
|
||||
COPY ./test/pytest/requirements.txt requirements.txt
|
||||
# Pytest fixtures.
|
||||
COPY ./test/pytest/conftest.py conftest.py
|
||||
COPY ./test/pytest/conftest.py conftest.py
|
||||
RUN pip install -Ur requirements.txt
|
||||
|
||||
# Activate the Python virtual environment configured in the Dockerfile.
|
||||
|
|
@ -70,6 +75,7 @@ RUN service influxdb start
|
|||
|
||||
# Copy test scripts and make them executable.
|
||||
COPY --chmod=755 ./test/scripts/parse_yaml.sh /usr/local/bin/parse_yaml
|
||||
COPY --chmod=755 ./test/scripts/get-container-info.sh /usr/local/bin/get-container-info
|
||||
|
||||
ENTRYPOINT [ "pytest" ]
|
||||
|
||||
|
|
|
|||
|
|
@ -240,6 +240,83 @@ paths:
|
|||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
/ping:
|
||||
get:
|
||||
description: |
|
||||
Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.
|
||||
|
||||
The response is a HTTP `204` status code to inform you the querier is available.
|
||||
|
||||
For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.
|
||||
|
||||
To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write).
|
||||
|
||||
This endpoint doesn't require authentication.
|
||||
operationId: GetPing
|
||||
responses:
|
||||
'204':
|
||||
description: |
|
||||
Success--the querier is available.
|
||||
Headers contain InfluxDB version information.
|
||||
headers:
|
||||
X-Influxdb-Build:
|
||||
description: |
|
||||
The type of InfluxDB build.
|
||||
schema:
|
||||
type: string
|
||||
X-Influxdb-Version:
|
||||
description: |
|
||||
The version of InfluxDB.
|
||||
schema:
|
||||
type: integer
|
||||
4xx:
|
||||
description: |
|
||||
#### InfluxDB Cloud
|
||||
- Doesn't return this error.
|
||||
security:
|
||||
- {}
|
||||
servers: []
|
||||
summary: Get the status of the instance
|
||||
tags:
|
||||
- Ping
|
||||
head:
|
||||
description: |
|
||||
Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.
|
||||
|
||||
The response is a HTTP `204` status code to inform you the querier is available.
|
||||
|
||||
For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.
|
||||
|
||||
To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write).
|
||||
|
||||
This endpoint doesn't require authentication.
|
||||
|
||||
operationId: HeadPing
|
||||
responses:
|
||||
'204':
|
||||
description: |
|
||||
Success--the querier is available.
|
||||
Headers contain InfluxDB version information.
|
||||
headers:
|
||||
X-Influxdb-Build:
|
||||
description: The type of InfluxDB build.
|
||||
schema:
|
||||
type: string
|
||||
X-Influxdb-Version:
|
||||
description: |
|
||||
The version of InfluxDB.
|
||||
schema:
|
||||
type: integer
|
||||
4xx:
|
||||
description: |
|
||||
#### InfluxDB Cloud
|
||||
- Doesn't return this error.
|
||||
security:
|
||||
- {}
|
||||
servers: []
|
||||
summary: Get the status of the instance
|
||||
tags:
|
||||
- Ping
|
||||
components:
|
||||
parameters:
|
||||
TraceSpan:
|
||||
|
|
|
|||
|
|
@ -149,17 +149,20 @@ paths:
|
|||
/ping:
|
||||
get:
|
||||
description: |
|
||||
Retrieves the status and InfluxDB version of the instance.
|
||||
Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.
|
||||
|
||||
The response is a HTTP `204` status code to inform you the querier is available.
|
||||
|
||||
Use this endpoint to monitor uptime for the InfluxDB instance. The response
|
||||
returns a HTTP `204` status code to inform you the instance is available.
|
||||
For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.
|
||||
|
||||
To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write).
|
||||
|
||||
This endpoint doesn't require authentication.
|
||||
operationId: GetPing
|
||||
responses:
|
||||
'204':
|
||||
description: |
|
||||
Success.
|
||||
Success--the querier is available.
|
||||
Headers contain InfluxDB version information.
|
||||
headers:
|
||||
X-Influxdb-Build:
|
||||
|
|
@ -184,17 +187,20 @@ paths:
|
|||
- Ping
|
||||
head:
|
||||
description: |
|
||||
Returns the status and InfluxDB version of the instance.
|
||||
Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.
|
||||
|
||||
The response is a HTTP `204` status code to inform you the querier is available.
|
||||
|
||||
Use this endpoint to monitor uptime for the InfluxDB instance. The response
|
||||
returns a HTTP `204` status code to inform you the instance is available.
|
||||
For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.
|
||||
|
||||
To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write).
|
||||
|
||||
This endpoint doesn't require authentication.
|
||||
operationId: HeadPing
|
||||
responses:
|
||||
'204':
|
||||
description: |
|
||||
Success.
|
||||
Success--the querier is available.
|
||||
Headers contain InfluxDB version information.
|
||||
headers:
|
||||
X-Influxdb-Build:
|
||||
|
|
@ -229,9 +235,8 @@ paths:
|
|||
2. If successful, attempts to [ingest data](/influxdb/cloud-dedicated/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb/cloud-dedicated/write-data/troubleshoot/#review-http-status-codes).
|
||||
3. Ingests or rejects data in the batch and returns one of the following HTTP status codes:
|
||||
|
||||
- `204 No Content`: all data in the batch is ingested
|
||||
- `201 Created` (_If the cluster is configured to allow **partial writes**_): some points in the batch are ingested and queryable, and some points are rejected
|
||||
- `400 Bad Request`: all data is rejected
|
||||
- `204 No Content`: All data in the batch is ingested.
|
||||
- `400 Bad Request`: Some (_when **partial writes** are configured for the cluster_) or all of the data has been rejected. Data that has not been rejected is ingested and queryable.
|
||||
|
||||
The response body contains error details about [rejected points](/influxdb/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points.
|
||||
|
||||
|
|
@ -656,33 +661,29 @@ paths:
|
|||
description: Line protocol body
|
||||
required: true
|
||||
responses:
|
||||
'201':
|
||||
description: |
|
||||
Success ("Created"). Some points in the batch are written and queryable, and some points are rejected. The response body contains details about the [rejected points](/influxdb/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points.
|
||||
content:
|
||||
application/json:
|
||||
examples:
|
||||
partialWriteErrorWithRejectedPoints:
|
||||
summary: Partial write rejects points with syntax errors
|
||||
value:
|
||||
code: invalid
|
||||
line: 2
|
||||
message: 'failed to parse line protocol: errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
schema:
|
||||
$ref: '#/components/schemas/LineProtocolError'
|
||||
'204':
|
||||
description: Success ("No Content"). All data in the batch is written and queryable.
|
||||
'400':
|
||||
description: Bad Request. All data in body was rejected and not written.
|
||||
description: |
|
||||
Data from the batch was rejected and not written. The response body indicates if a partial write occurred or all data was rejected.
|
||||
If a partial write occurred, then some points from the batch are written and queryable.
|
||||
|
||||
The response body contains details about the [rejected points](/influxdb/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points.
|
||||
content:
|
||||
application/json:
|
||||
examples:
|
||||
rejectsAllPoints:
|
||||
rejectedAllPoints:
|
||||
summary: Rejected all points
|
||||
value:
|
||||
code: invalid
|
||||
line: 2
|
||||
message: 'failed to parse line protocol: errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
message: 'no data written, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
partialWriteErrorWithRejectedPoints:
|
||||
summary: Partial write rejects some points
|
||||
value:
|
||||
code: invalid
|
||||
line: 2
|
||||
message: 'partial write has occurred, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
schema:
|
||||
$ref: '#/components/schemas/LineProtocolError'
|
||||
'401':
|
||||
|
|
|
|||
|
|
@ -7491,9 +7491,8 @@ paths:
|
|||
2. If successful, attempts to [ingest data](/influxdb/cloud-serverless/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb/cloud-serverless/write-data/troubleshoot/#review-http-status-codes).
|
||||
3. Ingests or rejects data in the batch and returns one of the following HTTP status codes:
|
||||
|
||||
- `204 No Content`: all data in the batch is ingested
|
||||
- `201 Created`: some points in the batch are ingested and queryable, and some points are rejected
|
||||
- `400 Bad Request`: all data is rejected
|
||||
- `204 No Content`: All data in the batch is ingested.
|
||||
- `400 Bad Request`: Data from the batch was rejected and not written. The response body indicates if a partial write occurred.
|
||||
|
||||
The response body contains error details about [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points.
|
||||
|
||||
|
|
@ -7639,27 +7638,14 @@ paths:
|
|||
- [Best practices for optimizing writes](/influxdb/cloud-serverless/write-data/best-practices/optimize-writes/)
|
||||
required: true
|
||||
responses:
|
||||
'201':
|
||||
description: |
|
||||
Success ("Created"). Some points in the batch are written and queryable, and some points are rejected. The response body contains details about the [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points.
|
||||
content:
|
||||
application/json:
|
||||
examples:
|
||||
partialWriteErrorWithRejectedPoints:
|
||||
summary: Partial write rejects points with syntax errors
|
||||
value:
|
||||
code: invalid
|
||||
line: 2
|
||||
message: 'failed to parse line protocol: errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
schema:
|
||||
$ref: '#/components/schemas/LineProtocolError'
|
||||
'204':
|
||||
description: Success ("No Content"). All data in the batch is written and queryable.
|
||||
'400':
|
||||
description: |
|
||||
All data in the batch was rejected and not written.
|
||||
Data from the batch was rejected and not written. The response body indicates if a partial write occurred or all data was rejected.
|
||||
If a partial write occurred, then some points from the batch are written and queryable.
|
||||
|
||||
The response body contains details about the [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points).
|
||||
The response body contains details about the [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points.
|
||||
content:
|
||||
application/json:
|
||||
examples:
|
||||
|
|
@ -7668,7 +7654,13 @@ paths:
|
|||
value:
|
||||
code: invalid
|
||||
line: 2
|
||||
message: 'failed to parse line protocol: errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
message: 'no data written, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
partialWriteErrorWithRejectedPoints:
|
||||
summary: Partial write rejects some points
|
||||
value:
|
||||
code: invalid
|
||||
line: 2
|
||||
message: 'partial write has occurred, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
schema:
|
||||
$ref: '#/components/schemas/LineProtocolError'
|
||||
'401':
|
||||
|
|
@ -7704,14 +7696,7 @@ paths:
|
|||
The request payload is too large.
|
||||
InfluxDB rejected the batch and did not write any data.
|
||||
|
||||
InfluxDB returns this error if the payload exceeds the 50MB size limit.
|
||||
'422':
|
||||
description: |
|
||||
Unprocessable Entity.
|
||||
|
||||
The request contained data outside the bucket's retention period. InfluxDB rejected the batch and wrote no data.
|
||||
|
||||
The response body contains details about the [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points).
|
||||
InfluxDB returns this error if the payload exceeds the 50MB size limit or all data is outside the retention window.
|
||||
'429':
|
||||
description: |
|
||||
Too many requests.
|
||||
|
|
|
|||
|
|
@ -646,17 +646,33 @@ paths:
|
|||
'204':
|
||||
description: Write data is correctly formatted and accepted for writing to the database.
|
||||
'400':
|
||||
description: |
|
||||
Data from the batch was rejected and not written. The response body indicates if a partial write occurred or all data was rejected.
|
||||
If a partial write occurred, then some points from the batch are written and queryable.
|
||||
The response body contains details about the [rejected points](/influxdb/clustered/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points.
|
||||
content:
|
||||
application/json:
|
||||
examples:
|
||||
rejectedAllPoints:
|
||||
summary: Rejected all points
|
||||
value:
|
||||
code: invalid
|
||||
line: 2
|
||||
message: 'no data written, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
partialWriteErrorWithRejectedPoints:
|
||||
summary: Partial write rejects some points
|
||||
value:
|
||||
code: invalid
|
||||
line: 2
|
||||
message: 'partial write has occurred, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
|
||||
schema:
|
||||
$ref: '#/components/schemas/LineProtocolError'
|
||||
description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written.
|
||||
'401':
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
description: Token doesn't have sufficient permissions to write to this database or the database doesn't exist.
|
||||
description: Token doesn't have sufficient permissions to write to this database or the database doesn't exist.
|
||||
'403':
|
||||
content:
|
||||
application/json:
|
||||
|
|
|
|||
|
|
@ -1,165 +0,0 @@
|
|||
/*!
|
||||
* JavaScript Cookie v2.2.0
|
||||
* https://github.com/js-cookie/js-cookie
|
||||
*
|
||||
* Copyright 2006, 2015 Klaus Hartl & Fagner Brack
|
||||
* Released under the MIT license
|
||||
*/
|
||||
;(function (factory) {
|
||||
var registeredInModuleLoader = false;
|
||||
if (typeof define === 'function' && define.amd) {
|
||||
define(factory);
|
||||
registeredInModuleLoader = true;
|
||||
}
|
||||
if (typeof exports === 'object') {
|
||||
module.exports = factory();
|
||||
registeredInModuleLoader = true;
|
||||
}
|
||||
if (!registeredInModuleLoader) {
|
||||
var OldCookies = window.Cookies;
|
||||
var api = window.Cookies = factory();
|
||||
api.noConflict = function () {
|
||||
window.Cookies = OldCookies;
|
||||
return api;
|
||||
};
|
||||
}
|
||||
}(function () {
|
||||
function extend () {
|
||||
var i = 0;
|
||||
var result = {};
|
||||
for (; i < arguments.length; i++) {
|
||||
var attributes = arguments[ i ];
|
||||
for (var key in attributes) {
|
||||
result[key] = attributes[key];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
function init (converter) {
|
||||
function api (key, value, attributes) {
|
||||
var result;
|
||||
if (typeof document === 'undefined') {
|
||||
return;
|
||||
}
|
||||
|
||||
// Write
|
||||
|
||||
if (arguments.length > 1) {
|
||||
attributes = extend({
|
||||
path: '/'
|
||||
}, api.defaults, attributes);
|
||||
|
||||
if (typeof attributes.expires === 'number') {
|
||||
var expires = new Date();
|
||||
expires.setMilliseconds(expires.getMilliseconds() + attributes.expires * 864e+5);
|
||||
attributes.expires = expires;
|
||||
}
|
||||
|
||||
// We're using "expires" because "max-age" is not supported by IE
|
||||
attributes.expires = attributes.expires ? attributes.expires.toUTCString() : '';
|
||||
|
||||
try {
|
||||
result = JSON.stringify(value);
|
||||
if (/^[\{\[]/.test(result)) {
|
||||
value = result;
|
||||
}
|
||||
} catch (e) {}
|
||||
|
||||
if (!converter.write) {
|
||||
value = encodeURIComponent(String(value))
|
||||
.replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g, decodeURIComponent);
|
||||
} else {
|
||||
value = converter.write(value, key);
|
||||
}
|
||||
|
||||
key = encodeURIComponent(String(key));
|
||||
key = key.replace(/%(23|24|26|2B|5E|60|7C)/g, decodeURIComponent);
|
||||
key = key.replace(/[\(\)]/g, escape);
|
||||
|
||||
var stringifiedAttributes = '';
|
||||
|
||||
for (var attributeName in attributes) {
|
||||
if (!attributes[attributeName]) {
|
||||
continue;
|
||||
}
|
||||
stringifiedAttributes += '; ' + attributeName;
|
||||
if (attributes[attributeName] === true) {
|
||||
continue;
|
||||
}
|
||||
stringifiedAttributes += '=' + attributes[attributeName];
|
||||
}
|
||||
return (document.cookie = key + '=' + value + stringifiedAttributes);
|
||||
}
|
||||
|
||||
// Read
|
||||
|
||||
if (!key) {
|
||||
result = {};
|
||||
}
|
||||
|
||||
// To prevent the for loop in the first place assign an empty array
|
||||
// in case there are no cookies at all. Also prevents odd result when
|
||||
// calling "get()"
|
||||
var cookies = document.cookie ? document.cookie.split('; ') : [];
|
||||
var rdecode = /(%[0-9A-Z]{2})+/g;
|
||||
var i = 0;
|
||||
|
||||
for (; i < cookies.length; i++) {
|
||||
var parts = cookies[i].split('=');
|
||||
var cookie = parts.slice(1).join('=');
|
||||
|
||||
if (!this.json && cookie.charAt(0) === '"') {
|
||||
cookie = cookie.slice(1, -1);
|
||||
}
|
||||
|
||||
try {
|
||||
var name = parts[0].replace(rdecode, decodeURIComponent);
|
||||
cookie = converter.read ?
|
||||
converter.read(cookie, name) : converter(cookie, name) ||
|
||||
cookie.replace(rdecode, decodeURIComponent);
|
||||
|
||||
if (this.json) {
|
||||
try {
|
||||
cookie = JSON.parse(cookie);
|
||||
} catch (e) {}
|
||||
}
|
||||
|
||||
if (key === name) {
|
||||
result = cookie;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!key) {
|
||||
result[name] = cookie;
|
||||
}
|
||||
} catch (e) {}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
api.set = api;
|
||||
api.get = function (key) {
|
||||
return api.call(api, key);
|
||||
};
|
||||
api.getJSON = function () {
|
||||
return api.apply({
|
||||
json: true
|
||||
}, [].slice.call(arguments));
|
||||
};
|
||||
api.defaults = {};
|
||||
|
||||
api.remove = function (key, attributes) {
|
||||
api(key, '', extend(attributes, {
|
||||
expires: -1
|
||||
}));
|
||||
};
|
||||
|
||||
api.withConverter = init;
|
||||
|
||||
return api;
|
||||
}
|
||||
|
||||
return init(function () {});
|
||||
}));
|
||||
|
|
@ -1,46 +1,41 @@
|
|||
/*
|
||||
This represents an API for managing cookies for the InfluxData documentation.
|
||||
It uses the Cookies.js library to store data as session cookies.
|
||||
This is done to comply with cookie privacy laws and limit the cookies used
|
||||
to manage the user experience throughout the InfluxData documentation.
|
||||
|
||||
These functions manage the following InfluxDB cookies
|
||||
This represents an API for managing user and client-side settings for the
|
||||
InfluxData documentation. It uses the local browser storage.
|
||||
|
||||
These functions manage the following InfluxDB settings:
|
||||
|
||||
- influxdata_docs_preferences: Docs UI/UX-related preferences (obj)
|
||||
- influxdata_docs_urls: User-defined InfluxDB URLs for each product (obj)
|
||||
- influxdata_docs_notifications:
|
||||
- messages: Messages (data/notifications.yaml) that have been seen (array)
|
||||
- callouts: Feature callouts that have been seen (array)
|
||||
- influxdata_docs_ported: Temporary cookie to help port old cookies to new structure
|
||||
*/
|
||||
|
||||
// Prefix for all InfluxData docs cookies
|
||||
const cookiePrefix = 'influxdata_docs_';
|
||||
// Prefix for all InfluxData docs local storage
|
||||
const storagePrefix = 'influxdata_docs_';
|
||||
|
||||
/*
|
||||
Initialize a cookie with a default value.
|
||||
Initialize data in local storage with a default value.
|
||||
*/
|
||||
initializeCookie = (cookieName, defaultValue) => {
|
||||
fullCookieName = cookiePrefix + cookieName;
|
||||
initializeLocalStorage = (storageKey, defaultValue) => {
|
||||
fullStorageKey = storagePrefix + storageKey;
|
||||
|
||||
// Check if the cookie exists before initializing the cookie
|
||||
if (Cookies.get(fullCookieName) === undefined) {
|
||||
Cookies.set(fullCookieName, defaultValue);
|
||||
// Check if the data exists before initializing the data
|
||||
if (localStorage.getItem(fullStorageKey) === null) {
|
||||
localStorage.setItem(fullStorageKey, defaultValue);
|
||||
}
|
||||
};
|
||||
|
||||
// Initialize all InfluxData docs cookies with defaults
|
||||
|
||||
/*
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////// INFLUXDATA DOCS PREFERENCES /////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
*/
|
||||
|
||||
const prefCookieName = cookiePrefix + 'preferences';
|
||||
const prefStorageKey = storagePrefix + 'preferences';
|
||||
|
||||
// Default preferences
|
||||
var defaultPref = {
|
||||
var defaultPrefObj = {
|
||||
api_lib: null,
|
||||
influxdb_url: 'cloud',
|
||||
sidebar_state: 'open',
|
||||
|
|
@ -50,35 +45,35 @@ var defaultPref = {
|
|||
};
|
||||
|
||||
/*
|
||||
Retrieve a preference from the preference cookie.
|
||||
If the cookie doesn't exist, initialize it with default values.
|
||||
Retrieve a preference from the preference key.
|
||||
If the key doesn't exist, initialize it with default values.
|
||||
*/
|
||||
getPreference = prefName => {
|
||||
// Initialize the preference cookie if it doesn't already exist
|
||||
if (Cookies.get(prefCookieName) === undefined) {
|
||||
initializeCookie('preferences', defaultPref);
|
||||
// Initialize preference data if it doesn't already exist
|
||||
if (localStorage.getItem(prefStorageKey) === null) {
|
||||
initializeLocalStorage('preferences', JSON.stringify(defaultPrefObj));
|
||||
}
|
||||
|
||||
// Retrieve and parse the cookie as JSON
|
||||
prefString = Cookies.get(prefCookieName);
|
||||
// Retrieve and parse preferences as JSON
|
||||
prefString = localStorage.getItem(prefStorageKey);
|
||||
prefObj = JSON.parse(prefString);
|
||||
|
||||
// Return the value of the specified preference
|
||||
return prefObj[prefName];
|
||||
};
|
||||
|
||||
// Set a preference in the preferences cookie
|
||||
// Set a preference in the preferences key
|
||||
setPreference = (prefID, prefValue) => {
|
||||
var prefString = Cookies.get(prefCookieName);
|
||||
var prefString = localStorage.getItem(prefStorageKey);
|
||||
let prefObj = JSON.parse(prefString);
|
||||
|
||||
prefObj[prefID] = prefValue;
|
||||
|
||||
Cookies.set(prefCookieName, prefObj);
|
||||
localStorage.setItem(prefStorageKey, JSON.stringify(prefObj));
|
||||
};
|
||||
|
||||
// Return an object containing all preferences
|
||||
getPreferences = () => JSON.parse(Cookies.get(prefCookieName));
|
||||
getPreferences = () => JSON.parse(localStorage.getItem(prefStorageKey));
|
||||
|
||||
/*
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
@ -86,7 +81,7 @@ getPreferences = () => JSON.parse(Cookies.get(prefCookieName));
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
*/
|
||||
|
||||
const urlCookieName = cookiePrefix + 'urls';
|
||||
const urlStorageKey = storagePrefix + 'urls';
|
||||
|
||||
// Default URLs per product
|
||||
var defaultUrls = {
|
||||
|
|
@ -97,8 +92,8 @@ var defaultUrls = {
|
|||
clustered: 'cluster-host.com',
|
||||
};
|
||||
|
||||
// Defines the default urls cookie value
|
||||
var defaultUrlsCookie = {
|
||||
// Defines the default urls value
|
||||
var defaultUrlsObj = {
|
||||
oss: defaultUrls.oss,
|
||||
cloud: defaultUrls.cloud,
|
||||
serverless: defaultUrls.serverless,
|
||||
|
|
@ -112,25 +107,25 @@ var defaultUrlsCookie = {
|
|||
custom: '',
|
||||
};
|
||||
|
||||
// Return an object that contains all InfluxDB urls stored in the urls cookie
|
||||
// Return an object that contains all InfluxDB urls stored in the urls key
|
||||
getInfluxDBUrls = () => {
|
||||
// Initialize the urls cookie if it doesn't already exist
|
||||
if (Cookies.get(urlCookieName) === undefined) {
|
||||
initializeCookie('urls', defaultUrlsCookie);
|
||||
// Initialize urls data if it doesn't already exist
|
||||
if (localStorage.getItem(urlStorageKey) === null) {
|
||||
initializeLocalStorage('urls', JSON.stringify(defaultUrlsObj));
|
||||
}
|
||||
|
||||
return JSON.parse(Cookies.get(urlCookieName));
|
||||
return JSON.parse(localStorage.getItem(urlStorageKey));
|
||||
};
|
||||
|
||||
// Get the current or previous URL for a specific product or a custom url
|
||||
getInfluxDBUrl = product => {
|
||||
// Initialize the urls cookie if it doesn't already exist
|
||||
if (Cookies.get(urlCookieName) === undefined) {
|
||||
initializeCookie('urls', defaultUrlsCookie);
|
||||
// Initialize urls data if it doesn't already exist
|
||||
if (localStorage.getItem(urlStorageKey) === null) {
|
||||
initializeLocalStorage('urls', JSON.stringify(defaultUrlsObj));
|
||||
}
|
||||
|
||||
// Retrieve and parse the cookie as JSON
|
||||
urlsString = Cookies.get(urlCookieName);
|
||||
// Retrieve and parse the URLs as JSON
|
||||
urlsString = localStorage.getItem(urlStorageKey);
|
||||
urlsObj = JSON.parse(urlsString);
|
||||
|
||||
// Return the URL of the specified product
|
||||
|
|
@ -138,27 +133,27 @@ getInfluxDBUrl = product => {
|
|||
};
|
||||
|
||||
/*
|
||||
Set multiple product URLs in the urls cookie.
|
||||
Set multiple product URLs in the urls key.
|
||||
Input should be an object where the key is the product and the value is the
|
||||
URL to set for that product.
|
||||
*/
|
||||
setInfluxDBUrls = updatedUrlsObj => {
|
||||
var urlsString = Cookies.get(urlCookieName);
|
||||
var urlsString = localStorage.getItem(urlStorageKey);
|
||||
let urlsObj = JSON.parse(urlsString);
|
||||
|
||||
newUrlsObj = { ...urlsObj, ...updatedUrlsObj };
|
||||
|
||||
Cookies.set(urlCookieName, newUrlsObj);
|
||||
localStorage.setItem(urlStorageKey, JSON.stringify(newUrlsObj));
|
||||
};
|
||||
|
||||
// Set an InfluxDB URL to an empty string in the urls cookie
|
||||
// Set an InfluxDB URL to an empty string in the urls key
|
||||
removeInfluxDBUrl = product => {
|
||||
var urlsString = Cookies.get(urlCookieName);
|
||||
var urlsString = localStorage.getItem(urlStorageKey);
|
||||
let urlsObj = JSON.parse(urlsString);
|
||||
|
||||
urlsObj[product] = '';
|
||||
|
||||
Cookies.set(urlCookieName, urlsObj);
|
||||
localStorage.setItem(urlStorageKey, JSON.stringify(urlsObj));
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -167,25 +162,25 @@ removeInfluxDBUrl = product => {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
*/
|
||||
|
||||
const notificationCookieName = cookiePrefix + 'notifications';
|
||||
const notificationStorageKey = storagePrefix + 'notifications';
|
||||
|
||||
// Default notifications
|
||||
var defaultNotifications = {
|
||||
var defaultNotificationsObj = {
|
||||
messages: [],
|
||||
callouts: [],
|
||||
};
|
||||
|
||||
getNotifications = () => {
|
||||
// Initialize the notifications cookie if it doesn't already exist
|
||||
if (Cookies.get(notificationCookieName) === undefined) {
|
||||
initializeCookie('notifications', defaultNotifications);
|
||||
// Initialize notifications data if it doesn't already exist
|
||||
if (localStorage.getItem(notificationStorageKey) === null) {
|
||||
initializeLocalStorage('notifications', JSON.stringify(defaultNotificationsObj));
|
||||
}
|
||||
|
||||
// Retrieve and parse the cookie as JSON
|
||||
notificationString = Cookies.get(notificationCookieName);
|
||||
// Retrieve and parse the notifications data as JSON
|
||||
notificationString = localStorage.getItem(notificationStorageKey);
|
||||
notificationObj = JSON.parse(notificationString);
|
||||
|
||||
// Return the value of the specified preference
|
||||
// Return the notifications object
|
||||
return notificationObj;
|
||||
};
|
||||
|
||||
|
|
@ -222,5 +217,5 @@ setNotificationAsRead = (notificationID, notificationType) => {
|
|||
readNotifications.push(notificationID);
|
||||
notificationsObj[notificationType + 's'] = readNotifications;
|
||||
|
||||
Cookies.set(notificationCookieName, notificationsObj);
|
||||
localStorage.setItem(notificationStorageKey, JSON.stringify(notificationsObj));
|
||||
};
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/////////////////////////// Table of Contents Script ///////////////////////////
|
||||
|
||||
/*
|
||||
* This script is used to generate a table of contents for the
|
||||
* release notes pages.
|
||||
*/
|
||||
|
||||
// Use jQuery filter to get an array of all the *release* h2 elements
|
||||
const releases = $('h2').filter(
|
||||
(_i, el) => !el.id.match(/checkpoint-releases/)
|
||||
);
|
||||
|
||||
// Extract data about each release from the array of releases
|
||||
releaseData = releases.map((_i, el) => ({
|
||||
name: el.textContent,
|
||||
id: el.id,
|
||||
class: el.getAttribute('class'),
|
||||
date: el.getAttribute('date')
|
||||
}));
|
||||
|
||||
// Use release data to generate a list item for each release
|
||||
getReleaseItem = (releaseData) => {
|
||||
var li = document.createElement("li");
|
||||
if (releaseData.class !== null) {
|
||||
li.className = releaseData.class;
|
||||
}
|
||||
li.innerHTML = `<a href="#${releaseData.id}">${releaseData.name}</a>`;
|
||||
li.setAttribute('date', releaseData.date);
|
||||
return li;
|
||||
}
|
||||
|
||||
// Use jQuery each to build the release table of contents
|
||||
releaseData.each((_i, release) => {
|
||||
$('#release-toc ul')[0].appendChild(getReleaseItem(release));
|
||||
});
|
||||
|
||||
/*
|
||||
* This script is used to expand the release notes table of contents by the
|
||||
* number specified in the `show` attribute of `ul.release-list`.
|
||||
* Once all the release items are visible, the "Show More" button is hidden.
|
||||
*/
|
||||
$('#release-toc .show-more').click(function () {
|
||||
const itemHeight = 1.885; // Item height in rem
|
||||
const releaseNum = releaseData.length;
|
||||
const maxHeight = releaseNum * itemHeight;
|
||||
const releaseIncrement = Number($('#release-list')[0].getAttribute('show'));
|
||||
const currentHeight = Number(
|
||||
$('#release-list')[0].style.height.match(/\d+\.?\d+/)[0]
|
||||
);
|
||||
const potentialHeight = currentHeight + releaseIncrement * itemHeight;
|
||||
const newHeight = potentialHeight > maxHeight ? maxHeight : potentialHeight;
|
||||
|
||||
$('#release-list')[0].style.height = `${newHeight}rem`;
|
||||
|
||||
if (newHeight >= maxHeight) {
|
||||
$('#release-toc .show-more').fadeOut(100);
|
||||
}
|
||||
});
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
// Store the host value for the current page
|
||||
currentPageHost = window.location.href.match(/^(?:[^\/]*\/){2}[^\/]+/g)[0];
|
||||
const currentPageHost = window.location.href.match(/^(?:[^\/]*\/){2}[^\/]+/g)[0];
|
||||
|
||||
// Define v3-wayfinding elements
|
||||
var wayfindingModal = document.getElementById('v3-wayfinding-modal');
|
||||
|
|
@ -55,6 +55,9 @@ function slideUp (elem) {
|
|||
* - Has the user opted out of the wayfinding modal?
|
||||
*/
|
||||
function shouldOpenWayfinding () {
|
||||
// Extract the protocol and hostname of referrer
|
||||
const referrerMatch = document.referrer.match(/^(?:[^\/]*\/){2}[^\/]+/g);
|
||||
const referrerHost = referrerMatch ? referrerMatch[0] : '';
|
||||
var isExternalReferrer = !referrerWhitelist.includes(referrerHost);
|
||||
var wayfindingOptedOut = getPreference(wayfindingPrefCookie);
|
||||
|
||||
|
|
|
|||
|
|
@ -56,6 +56,16 @@
|
|||
color: $r-dreamsicle;
|
||||
}
|
||||
|
||||
&.checkpoint::before {
|
||||
content: '\e93b';
|
||||
font-family: 'icomoon-v4';
|
||||
font-size: 2.25rem;
|
||||
color: $br-new-magenta;
|
||||
display: inline;
|
||||
margin: 0 .5rem 0 0;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
&[metadata]::after {
|
||||
content: attr(metadata);
|
||||
margin-left: .65rem;
|
||||
|
|
@ -185,9 +195,7 @@
|
|||
"article/children",
|
||||
"article/code",
|
||||
"article/columns",
|
||||
"article/cloud",
|
||||
"article/diagrams",
|
||||
"article/enterprise",
|
||||
"article/expand",
|
||||
"article/feedback",
|
||||
"article/flex",
|
||||
|
|
@ -198,11 +206,10 @@
|
|||
"article/keybinding",
|
||||
"article/list-filters",
|
||||
"article/lists",
|
||||
"article/note",
|
||||
"article/opacity",
|
||||
"article/pagination-btns",
|
||||
"article/product-tags",
|
||||
"article/related",
|
||||
"article/release-toc",
|
||||
"article/scrollbars",
|
||||
"article/svgs",
|
||||
"article/tabbed-content",
|
||||
|
|
@ -211,8 +218,7 @@
|
|||
"article/telegraf-plugins",
|
||||
"article/title",
|
||||
"article/truncate",
|
||||
"article/video",
|
||||
"article/warn";
|
||||
"article/video";
|
||||
|
||||
|
||||
//////////////////////////////// Miscellaneous ///////////////////////////////
|
||||
|
|
@ -307,6 +313,10 @@
|
|||
&.magenta {
|
||||
color: $p-comet;
|
||||
}
|
||||
|
||||
&.pink {
|
||||
color: $br-new-magenta;
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////// Getting Started Buttons //////////////////////////
|
||||
|
|
|
|||
|
|
@ -139,6 +139,12 @@
|
|||
code { background: transparent !important; }
|
||||
}
|
||||
|
||||
hr {
|
||||
border-width: 1px 0 0 0;
|
||||
border-style: dotted;
|
||||
border-color: rgba($g20-white, .5);
|
||||
}
|
||||
|
||||
&.min {
|
||||
.notification-title {h3 {font-size: 1.15rem;}}
|
||||
.notification-content {
|
||||
|
|
|
|||
|
|
@ -183,78 +183,3 @@ pre[class*="language-"] {
|
|||
.mi + .n { color: $article-code-accent5; }
|
||||
}
|
||||
}
|
||||
|
||||
.note {
|
||||
.highlight {
|
||||
color: $article-note-code;
|
||||
|
||||
.gh,.go,.na,.nt,.nv,.ow
|
||||
{ color: $article-note-code }
|
||||
.c,.ch,.cm,.cpf,.c1, .cs,.w
|
||||
{ color: $article-note-code-accent1; }
|
||||
.gi
|
||||
{ background-color: $article-note-code-accent1; }
|
||||
.k,.kc,.kd,.kn,.kp,.kr,.nn
|
||||
{ color: $article-note-code-accent2; }
|
||||
.bp,.cp,.dl,.gt,.gu,.kt,.nb,.nc,.no,.sa,.sb,.sc,.sd,.se,.sh,.sx,.sr,.s1,.s2
|
||||
{ color: $article-note-code-accent3 }
|
||||
.err,.fm,.gr,.gd,.nd,.ne,.nf,.nl,.si
|
||||
{ color: $article-note-code-accent4 }
|
||||
.m,.ni,.mb,.mf,.mh,.mi,.mo,.vc,.vg,.vi,.vm,.il
|
||||
{ color: $article-note-code-accent5 }
|
||||
.gp,.o
|
||||
{ color: $article-note-code-accent6 }
|
||||
.ss
|
||||
{ color: $article-note-code-accent7 }
|
||||
}
|
||||
}
|
||||
|
||||
.warn {
|
||||
.highlight {
|
||||
color: $article-warn-code;
|
||||
|
||||
.gh,.go,.na,.nt,.nv,.ow
|
||||
{ color: $article-warn-code }
|
||||
.c,.ch,.cm,.cpf,.c1, .cs,.w
|
||||
{ color: $article-warn-code-accent1; }
|
||||
.gi
|
||||
{ background-color: $article-warn-code-accent1; }
|
||||
.k,.kc,.kd,.kn,.kp,.kr,.nn
|
||||
{ color: $article-warn-code-accent2; }
|
||||
.bp,.cp,.dl,.gt,.gu,.kt,.nb,.nc,.no,.sa,.sb,.sc,.sd,.se,.sh,.sx,.sr,.s1,.s2
|
||||
{ color: $article-warn-code-accent3 }
|
||||
.err,.fm,.gr,.gd,.nd,.ne,.nf,.nl,.si
|
||||
{ color: $article-warn-code-accent4 }
|
||||
.m,.ni,.mb,.mf,.mh,.mi,.mo,.vc,.vg,.vi,.vm,.il
|
||||
{ color: $article-warn-code-accent5 }
|
||||
.gp,.o
|
||||
{ color: $article-warn-code-accent6 }
|
||||
.ss
|
||||
{ color: $article-warn-code-accent7 }
|
||||
}
|
||||
}
|
||||
|
||||
.cloud-msg {
|
||||
.highlight {
|
||||
color: $article-cloud-code;
|
||||
|
||||
.gh,.go,.na,.nt,.nv,.ow
|
||||
{ color: $article-cloud-code }
|
||||
.c,.ch,.cm,.cpf,.c1, .cs,.w
|
||||
{ color: $article-cloud-code-accent1; }
|
||||
.gi
|
||||
{ background-color: $article-cloud-code-accent1; }
|
||||
.k,.kc,.kd,.kn,.kp,.kr,.nn
|
||||
{ color: $article-cloud-code-accent2; }
|
||||
.bp,.cp,.dl,.gt,.gu,.kt,.nb,.nc,.no,.sa,.sb,.sc,.sd,.se,.sh,.sx,.sr,.s1,.s2
|
||||
{ color: $article-cloud-code-accent3 }
|
||||
.err,.fm,.gr,.gd,.nd,.ne,.nf,.nl,.si
|
||||
{ color: $article-cloud-code-accent4 }
|
||||
.m,.ni,.mb,.mf,.mh,.mi,.mo,.vc,.vg,.vi,.vm,.il
|
||||
{ color: $article-cloud-code-accent5 }
|
||||
.gp,.o
|
||||
{ color: $article-cloud-code-accent6 }
|
||||
.ss
|
||||
{ color: $article-cloud-code-accent7 }
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
////////////////// Blockquotes, Notes, Warnings, & Messages //////////////////
|
||||
///////////////////////// Blockquotes and Alert Blocks /////////////////////////
|
||||
|
||||
blockquote,
|
||||
.block, {
|
||||
|
|
@ -20,8 +20,40 @@ blockquote,
|
|||
& > h6 {
|
||||
&:first-child {margin-top: 0; padding-top: 0.25em;}
|
||||
}
|
||||
|
||||
&.note,
|
||||
&.tip,
|
||||
&.important,
|
||||
&.warning, &.warn,
|
||||
&.caution {
|
||||
position: relative;
|
||||
&:before {
|
||||
position: absolute;
|
||||
font-family: "alert-icons";
|
||||
font-size: 1em;
|
||||
line-height: 1em;
|
||||
padding: .3rem;
|
||||
text-align: center;
|
||||
top: -.5rem;
|
||||
left: -.95rem;
|
||||
display: block;
|
||||
border-radius: 50%;
|
||||
box-shadow: 1px 3px 5px rgba($article-shadow, .5);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h2, h3, h4, h5, h6 {
|
||||
& + .note,
|
||||
& + .tip,
|
||||
& + .important,
|
||||
& + .warning,
|
||||
& + .caution {
|
||||
margin-top: 1.25rem;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
blockquote {
|
||||
padding: 1rem 1rem 1rem 1.25rem;
|
||||
border-color: rgba($article-text, .25);
|
||||
|
|
@ -59,3 +91,9 @@ blockquote {
|
|||
margin: .5rem 0 1rem;
|
||||
}
|
||||
}
|
||||
|
||||
@import "blocks/note",
|
||||
"blocks/tip",
|
||||
"blocks/important",
|
||||
"blocks/warning",
|
||||
"blocks/caution";
|
||||
|
|
|
|||
|
|
@ -48,6 +48,14 @@ a.btn {
|
|||
font-size: 1.1rem;
|
||||
}
|
||||
|
||||
&.arrow span.CaretOutlineRight {
|
||||
font-size: 1.5rem;
|
||||
line-height: 0;
|
||||
vertical-align: sub;
|
||||
display: inline-block;
|
||||
margin-right: -.65rem;
|
||||
}
|
||||
|
||||
&.small-plus {
|
||||
padding: .25em;
|
||||
line-height: .65rem;
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
code { font-size: .9rem; }
|
||||
}
|
||||
|
||||
p, li {
|
||||
p, li, ol, ul {
|
||||
& + .caption {
|
||||
padding: 0;
|
||||
margin: -.75rem 0 0;
|
||||
|
|
|
|||
|
|
@ -1,69 +0,0 @@
|
|||
.cloud {
|
||||
border-color: $article-cloud-base;
|
||||
background: rgba($article-cloud-base, .12);
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: $article-cloud-heading;
|
||||
}
|
||||
p,li {
|
||||
color: $article-cloud-text;
|
||||
}
|
||||
strong {
|
||||
color: inherit;
|
||||
}
|
||||
a {
|
||||
color: $article-cloud-link;
|
||||
code:after {
|
||||
border-color: transparent rgba($article-cloud-code, .35) transparent transparent;
|
||||
}
|
||||
&:hover {
|
||||
color: $article-cloud-link-hover;
|
||||
code:after {
|
||||
border-color: transparent $article-cloud-link-hover transparent transparent;
|
||||
}
|
||||
}
|
||||
}
|
||||
ol li:before {
|
||||
color: $article-cloud-text;
|
||||
}
|
||||
code, pre{
|
||||
color: $article-cloud-code;
|
||||
background: $article-cloud-code-bg;
|
||||
}
|
||||
img {
|
||||
box-shadow: 1px 3px 10px $article-cloud-shadow;
|
||||
}
|
||||
table{
|
||||
color: $article-cloud-text;
|
||||
box-shadow: 1px 3px 10px $article-cloud-shadow;
|
||||
thead{
|
||||
@include gradient($article-cloud-table-header);
|
||||
}
|
||||
tr:nth-child(even) td {
|
||||
background: $article-cloud-table-row-alt;
|
||||
}
|
||||
}
|
||||
blockquote {
|
||||
border-color: rgba($article-cloud-text, .25);
|
||||
p { color: rgba($article-cloud-text, .6); }
|
||||
}
|
||||
.code-tabs-wrapper .code-tabs a {
|
||||
background: transparent;
|
||||
color: rgba($article-cloud-text, .5);
|
||||
&:hover { color: rgba($article-cloud-text, 1); }
|
||||
&.is-active {
|
||||
color: rgba($article-cloud-text, 1);
|
||||
background: $article-cloud-code-bg};
|
||||
}
|
||||
|
||||
&.flex {
|
||||
font-style: italic;
|
||||
display: flex;
|
||||
div:first-child {
|
||||
position: relative;
|
||||
margin: .25rem 2rem 0 0;
|
||||
}
|
||||
}
|
||||
hr, .expand {
|
||||
border-color: rgba($article-cloud-base, .4)
|
||||
}
|
||||
}
|
||||
|
|
@ -84,6 +84,7 @@ span.code-callout, .code-placeholder {
|
|||
&.green {color: $article-code-accent3;}
|
||||
&.magenta {color: $br-new-magenta;}
|
||||
&.orange {color: $r-curacao;}
|
||||
&.delete, &.strike {text-decoration: line-through;}
|
||||
}
|
||||
|
||||
/////////////// Styles for code placeholders that can be updated ///////////////
|
||||
|
|
@ -157,7 +158,7 @@ span.code-callout, .code-placeholder {
|
|||
}
|
||||
}
|
||||
}
|
||||
.code-placeholder-key code {color: $code-placeholder}
|
||||
.code-placeholder-key code {color: $code-placeholder !important}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -1,19 +0,0 @@
|
|||
.enterprise {
|
||||
border-color: $article-enterprise-base;
|
||||
background: rgba($article-enterprise-base, .15);
|
||||
p,li {
|
||||
color: $article-enterprise-text;
|
||||
}
|
||||
a {
|
||||
color: $article-enterprise-link;
|
||||
&:hover { color: $article-enterprise-link-hover; }
|
||||
}
|
||||
&.flex {
|
||||
font-style: italic;
|
||||
display: flex;
|
||||
div:first-child {
|
||||
position: relative;
|
||||
margin: .25rem 2rem 0 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
.note {
|
||||
border-color: $article-note-base;
|
||||
background: rgba($article-note-base, .1);
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: $article-note-heading;
|
||||
}
|
||||
p, li {
|
||||
color: $article-note-text;
|
||||
}
|
||||
strong {
|
||||
color: $article-note-strong;
|
||||
}
|
||||
a {
|
||||
color: $article-note-link;
|
||||
code:after {
|
||||
border-color: transparent rgba($article-note-code, .35) transparent transparent;
|
||||
}
|
||||
&:hover {
|
||||
color: $article-note-link-hover;
|
||||
code:after {
|
||||
border-color: transparent $article-note-link-hover transparent transparent;
|
||||
}
|
||||
}
|
||||
}
|
||||
ol li:before {
|
||||
color: $article-note-text;
|
||||
}
|
||||
code, pre{
|
||||
color: $article-note-code;
|
||||
background: $article-note-code-bg;
|
||||
}
|
||||
img {
|
||||
box-shadow: 1px 3px 10px $article-note-shadow;
|
||||
}
|
||||
table{
|
||||
color: $article-note-text;
|
||||
box-shadow: 1px 3px 10px $article-note-shadow;
|
||||
thead{
|
||||
@include gradient($article-note-table-header);
|
||||
}
|
||||
tr:nth-child(even) td {
|
||||
background: $article-note-table-row-alt;
|
||||
}
|
||||
}
|
||||
blockquote {
|
||||
border-color: rgba($article-note-text, .25);
|
||||
p { color: rgba($article-note-text, .6); }
|
||||
}
|
||||
.code-tabs-wrapper .code-tabs a {
|
||||
background: transparent;
|
||||
color: rgba($article-note-text, .5);
|
||||
&:hover { color: rgba($article-note-text, 1); }
|
||||
&.is-active {
|
||||
color: rgba($article-note-text, 1);
|
||||
background: $article-note-code-bg};
|
||||
}
|
||||
hr, .expand {
|
||||
border-color: rgba($article-note-base, .4)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
.product-tags {
|
||||
margin: -2rem 0 2.5rem;
|
||||
span {
|
||||
margin-right: .25rem;
|
||||
padding: .2rem .65rem .25rem;
|
||||
border-radius: 1rem;
|
||||
font-weight: $medium;
|
||||
font-style: italic;
|
||||
font-size: .85rem;
|
||||
border: 1px solid;
|
||||
|
||||
&.oss {
|
||||
color: $product-oss;
|
||||
border-color: rgba($product-oss, .35);
|
||||
background: rgba($product-oss, .1);
|
||||
}
|
||||
&.cloud {
|
||||
color: $product-cloud;
|
||||
border-color: rgba($product-cloud, .35);
|
||||
background: rgba($product-cloud, .1);
|
||||
}
|
||||
&.enterprise {
|
||||
color: $product-enterprise;
|
||||
border-color: rgba($product-enterprise, .35);
|
||||
background: rgba($product-enterprise, .1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
#release-toc {
|
||||
margin: 2rem 0 3rem;
|
||||
|
||||
ul {
|
||||
list-style: none;
|
||||
overflow: hidden;
|
||||
padding-left: 0;
|
||||
margin-bottom: .75rem;
|
||||
transition: height .2s;
|
||||
|
||||
li {
|
||||
line-height: 1.2em;
|
||||
margin: .5rem 0;
|
||||
|
||||
&::after {
|
||||
content: attr(date);
|
||||
font-size: 1rem;
|
||||
margin-left: .5rem;
|
||||
color: rgba($article-text, .5);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&.clustered {
|
||||
ul {
|
||||
padding-left: 1.5rem;
|
||||
|
||||
.checkpoint {
|
||||
margin-left: -1.5rem;
|
||||
|
||||
&::before {
|
||||
content: '\e93b' !important;
|
||||
font-family: 'icomoon-v4';
|
||||
color: $br-new-magenta;
|
||||
display: inline-block;
|
||||
margin: 0 .5rem 0 0;
|
||||
vertical-align: top;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.show-more {
|
||||
color: $article-link;
|
||||
transition: color .2s;
|
||||
font-weight: $medium;
|
||||
font-size: 1rem;
|
||||
|
||||
&::before {
|
||||
content: '\e935';
|
||||
font-family: 'icomoon-v4';
|
||||
font-size: .9rem;
|
||||
color: $article-bg;
|
||||
background: $article-link;
|
||||
border-radius: 50%;
|
||||
margin-right: .5rem;
|
||||
transition: background .15s;
|
||||
}
|
||||
|
||||
&:hover {
|
||||
cursor: pointer;
|
||||
color: $article-link-hover;
|
||||
&::before {
|
||||
background: $article-link-hover;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3,15 +3,3 @@
|
|||
pre { @include scrollbar($article-code-bg, $article-code-scrollbar); }
|
||||
table { @include scrollbar($article-table-row-alt, $article-table-scrollbar);}
|
||||
|
||||
.note {
|
||||
pre { @include scrollbar($article-note-code-bg, $article-note-code-scrollbar); }
|
||||
table { @include scrollbar($article-note-table-row-alt, $article-note-table-scrollbar); }
|
||||
}
|
||||
.warn {
|
||||
pre { @include scrollbar($article-warn-code-bg, $article-warn-code-scrollbar); }
|
||||
table { @include scrollbar($article-warn-table-row-alt, $article-warn-table-scrollbar); }
|
||||
}
|
||||
.cloud {
|
||||
pre { @include scrollbar($article-cloud-code-bg, $article-cloud-code-scrollbar); }
|
||||
table { @include scrollbar($article-cloud-table-row-alt, $article-cloud-table-scrollbar); }
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,12 +10,13 @@
|
|||
|
||||
li {
|
||||
margin-bottom: 0;
|
||||
padding: 0 .65em 0 .75em;
|
||||
padding: .45em .75em;
|
||||
color: $article-heading;
|
||||
background: rgba($article-heading, .07);
|
||||
font-size: .95rem;
|
||||
font-weight: $medium;
|
||||
border-radius: 1em;
|
||||
line-height: 1.1rem;
|
||||
border-radius: 1.1em;
|
||||
display: inline-block;
|
||||
|
||||
&.updated-in,
|
||||
|
|
|
|||
|
|
@ -1,60 +0,0 @@
|
|||
.warn {
|
||||
border-color: $article-warn-base;
|
||||
background: $article-warn-bg;
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: $article-warn-heading;
|
||||
}
|
||||
p, li {
|
||||
color: $article-warn-text;
|
||||
}
|
||||
strong {
|
||||
color: inherit;
|
||||
}
|
||||
a {
|
||||
color: $article-warn-link !important;
|
||||
code:after {
|
||||
border-color: transparent rgba($article-warn-code, .35) transparent transparent;
|
||||
}
|
||||
&:hover {
|
||||
color: $article-warn-link-hover !important;
|
||||
code:after {
|
||||
border-color: transparent $article-warn-link-hover transparent transparent;
|
||||
}
|
||||
}
|
||||
}
|
||||
ol li:before {
|
||||
color: $article-warn-text;
|
||||
}
|
||||
code, pre{
|
||||
color: $article-warn-code;
|
||||
background: $article-warn-code-bg;
|
||||
}
|
||||
img {
|
||||
box-shadow: 1px 3px 10px $article-warn-shadow;
|
||||
}
|
||||
table{
|
||||
color: $article-warn-text;
|
||||
box-shadow: 1px 3px 10px $article-warn-shadow;
|
||||
thead{
|
||||
@include gradient($article-warn-table-header);
|
||||
}
|
||||
tr:nth-child(even) td {
|
||||
background: $article-warn-table-row-alt;
|
||||
}
|
||||
}
|
||||
blockquote {
|
||||
border-color: rgba($article-warn-text, .25);
|
||||
p { color: rgba($article-warn-text, .6); }
|
||||
}
|
||||
.code-tabs-wrapper .code-tabs a {
|
||||
background: transparent;
|
||||
color: rgba($article-warn-text, .5);
|
||||
&:hover { color: rgba($article-warn-text, 1); }
|
||||
&.is-active {
|
||||
color: rgba($article-warn-text, 1);
|
||||
background: $article-warn-code-bg};
|
||||
}
|
||||
hr, .expand {
|
||||
border-color: rgba($article-warn-base, .4)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
|
||||
.caution {
|
||||
&:before {
|
||||
content: "C";
|
||||
color: #fff;
|
||||
background: $article-caution-base;
|
||||
}
|
||||
|
||||
border-color: $article-caution-base;
|
||||
background: $article-caution-bg;
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: $article-caution-heading;
|
||||
a {
|
||||
color: inherit !important;
|
||||
&:hover{color: inherit !important}
|
||||
}
|
||||
}
|
||||
p, li {color: $article-caution-text;}
|
||||
strong { color: $article-caution-strong; }
|
||||
a {
|
||||
color: $article-caution-link !important;
|
||||
code:after {
|
||||
border-color: transparent rgba($article-caution-code, .35) transparent transparent;
|
||||
}
|
||||
&:hover {
|
||||
color: $article-caution-link-hover !important;
|
||||
code:after { border-color: transparent $article-caution-link-hover transparent transparent;}
|
||||
}
|
||||
}
|
||||
ol li:before {
|
||||
color: $article-caution-text;
|
||||
}
|
||||
code, pre{
|
||||
color: $article-caution-code;
|
||||
background: $article-caution-code-bg;
|
||||
}
|
||||
img {
|
||||
box-shadow: 1px 3px 10px $article-caution-shadow;
|
||||
}
|
||||
table{
|
||||
color: $article-caution-text;
|
||||
box-shadow: 1px 3px 10px $article-caution-shadow;
|
||||
thead{@include gradient($article-caution-table-header);}
|
||||
tr:nth-child(even) td {background: $article-caution-table-row-alt;}
|
||||
}
|
||||
blockquote {
|
||||
border-color: rgba($article-caution-text, .25);
|
||||
p { color: rgba($article-caution-text, .6); }
|
||||
}
|
||||
.code-tabs-wrapper .code-tabs a {
|
||||
background: transparent;
|
||||
color: rgba($article-caution-text, .5);
|
||||
&:hover { color: rgba($article-caution-text, 1); }
|
||||
&.is-active {
|
||||
color: rgba($article-caution-text, 1);
|
||||
background: $article-caution-code-bg};
|
||||
}
|
||||
hr, .expand {
|
||||
border-color: rgba($article-caution-base, .4) !important
|
||||
}
|
||||
|
||||
///////////////////////////////// Scrollbars /////////////////////////////////
|
||||
pre { @include scrollbar($article-caution-code-bg, $article-caution-code-scrollbar); }
|
||||
table { @include scrollbar($article-caution-table-row-alt, $article-caution-table-scrollbar); }
|
||||
|
||||
///////////////////////////// Syntax Highlighting ////////////////////////////
|
||||
.highlight {
|
||||
color: $article-caution-code;
|
||||
|
||||
.gh,.go,.na,.nt,.nv,.ow
|
||||
{ color: $article-caution-code }
|
||||
.c,.ch,.cm,.cpf,.c1, .cs,.w
|
||||
{ color: $article-caution-code-accent1; }
|
||||
.gi
|
||||
{ background-color: $article-caution-code-accent1; }
|
||||
.k,.kc,.kd,.kn,.kp,.kr,.nn
|
||||
{ color: $article-caution-code-accent2; }
|
||||
.bp,.cp,.dl,.gt,.gu,.kt,.nb,.nc,.no,.sa,.sb,.sc,.sd,.se,.sh,.sx,.sr,.s1,.s2
|
||||
{ color: $article-caution-code-accent3 }
|
||||
.err,.fm,.gr,.gd,.nd,.ne,.nf,.nl,.si
|
||||
{ color: $article-caution-code-accent4 }
|
||||
.m,.ni,.mb,.mf,.mh,.mi,.mo,.vc,.vg,.vi,.vm,.il
|
||||
{ color: $article-caution-code-accent5 }
|
||||
.gp,.o
|
||||
{ color: $article-caution-code-accent6 }
|
||||
.ss
|
||||
{ color: $article-caution-code-accent7 }
|
||||
// Javascript / Flux specific styles (duration and time values)
|
||||
.language-js {
|
||||
.mi + .nx, .mf + .nx { color: $article-caution-code-accent5; }
|
||||
}
|
||||
// SQL / InfluxQL specific styles (duration values)
|
||||
.language-sql {
|
||||
.mi + .n { color: $article-caution-code-accent5; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
.important {
|
||||
|
||||
}
|
||||
|
||||
.important {
|
||||
&:before {
|
||||
content: "I";
|
||||
color: #fff;
|
||||
background: $article-important-base;
|
||||
}
|
||||
|
||||
border-color: $article-important-base;
|
||||
background: rgba($article-important-base, .15);
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: $article-important-heading;
|
||||
a {
|
||||
color: inherit !important;
|
||||
&:hover{color: inherit !important}
|
||||
}
|
||||
}
|
||||
p, li {
|
||||
color: $article-important-text;
|
||||
}
|
||||
strong {
|
||||
color: $article-important-strong;
|
||||
}
|
||||
a {
|
||||
color: $article-important-link;
|
||||
code:after {
|
||||
border-color: transparent rgba($article-important-code, .35) transparent transparent;
|
||||
}
|
||||
&:hover {
|
||||
color: $article-important-link-hover;
|
||||
code:after {
|
||||
border-color: transparent $article-important-link-hover transparent transparent;
|
||||
}
|
||||
}
|
||||
}
|
||||
ol li:before {
|
||||
color: $article-important-text;
|
||||
}
|
||||
code, pre{
|
||||
color: $article-important-code;
|
||||
background: $article-important-code-bg;
|
||||
}
|
||||
img {
|
||||
box-shadow: 1px 3px 10px $article-important-shadow;
|
||||
}
|
||||
table{
|
||||
color: $article-important-text;
|
||||
box-shadow: 1px 3px 10px $article-important-shadow;
|
||||
thead{
|
||||
@include gradient($article-important-table-header);
|
||||
}
|
||||
tr:nth-child(even) td {
|
||||
background: $article-important-table-row-alt;
|
||||
}
|
||||
}
|
||||
blockquote {
|
||||
border-color: rgba($article-important-text, .25);
|
||||
p { color: rgba($article-important-text, .6); }
|
||||
}
|
||||
.code-tabs-wrapper .code-tabs a {
|
||||
background: transparent;
|
||||
color: rgba($article-important-text, .5);
|
||||
&:hover { color: rgba($article-important-text, 1); }
|
||||
&.is-active {
|
||||
color: rgba($article-important-text, 1);
|
||||
background: $article-important-code-bg};
|
||||
}
|
||||
hr, .expand {
|
||||
border-color: rgba($article-important-base, .4) !important;
|
||||
}
|
||||
|
||||
///////////////////////////////// Scrollbars /////////////////////////////////
|
||||
pre { @include scrollbar($article-important-code-bg, $article-important-code-scrollbar); }
|
||||
table { @include scrollbar($article-important-table-row-alt, $article-important-table-scrollbar); }
|
||||
|
||||
///////////////////////////// Syntax Highlighting ////////////////////////////
|
||||
.highlight {
|
||||
color: $article-important-code;
|
||||
|
||||
.gh,.go,.na,.nt,.nv,.ow
|
||||
{ color: $article-important-code }
|
||||
.c,.ch,.cm,.cpf,.c1, .cs,.w
|
||||
{ color: $article-important-code-accent1; }
|
||||
.gi
|
||||
{ background-color: $article-important-code-accent1; }
|
||||
.k,.kc,.kd,.kn,.kp,.kr,.nn
|
||||
{ color: $article-important-code-accent2; }
|
||||
.bp,.cp,.dl,.gt,.gu,.kt,.nb,.nc,.no,.sa,.sb,.sc,.sd,.se,.sh,.sx,.sr,.s1,.s2
|
||||
{ color: $article-important-code-accent3 }
|
||||
.err,.fm,.gr,.gd,.nd,.ne,.nf,.nl,.si
|
||||
{ color: $article-important-code-accent4 }
|
||||
.m,.ni,.mb,.mf,.mh,.mi,.mo,.vc,.vg,.vi,.vm,.il
|
||||
{ color: $article-important-code-accent5 }
|
||||
.gp,.o
|
||||
{ color: $article-important-code-accent6 }
|
||||
.ss
|
||||
{ color: $article-important-code-accent7 }
|
||||
// Javascript / Flux specific styles (duration and time values)
|
||||
.language-js {
|
||||
.mi + .nx, .mf + .nx { color: $article-important-code-accent5; }
|
||||
}
|
||||
// SQL / InfluxQL specific styles (duration values)
|
||||
.language-sql {
|
||||
.mi + .n { color: $article-important-code-accent5; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
.note {
|
||||
&:before {
|
||||
content: "N";
|
||||
color: #fff;
|
||||
background: $article-note-base;
|
||||
}
|
||||
}
|
||||
|
||||
.note {
|
||||
border-color: $article-note-base;
|
||||
background: rgba($article-note-base, .1);
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: $article-note-heading;
|
||||
a {
|
||||
color: inherit !important;
|
||||
&:hover{color: inherit !important}
|
||||
}
|
||||
}
|
||||
p, li {
|
||||
color: $article-note-text;
|
||||
}
|
||||
strong {
|
||||
color: $article-note-strong;
|
||||
}
|
||||
a {
|
||||
color: $article-note-link;
|
||||
code:after {
|
||||
border-color: transparent rgba($article-note-code, .35) transparent transparent;
|
||||
}
|
||||
&:hover {
|
||||
color: $article-note-link-hover;
|
||||
code:after {
|
||||
border-color: transparent $article-note-link-hover transparent transparent;
|
||||
}
|
||||
}
|
||||
}
|
||||
ol li:before {
|
||||
color: $article-note-text;
|
||||
}
|
||||
code, pre{
|
||||
color: $article-note-code;
|
||||
background: $article-note-code-bg;
|
||||
}
|
||||
img {
|
||||
box-shadow: 1px 3px 10px $article-note-shadow;
|
||||
}
|
||||
table{
|
||||
color: $article-note-text;
|
||||
box-shadow: 1px 3px 10px $article-note-shadow;
|
||||
thead{
|
||||
@include gradient($article-note-table-header);
|
||||
}
|
||||
tr:nth-child(even) td {
|
||||
background: $article-note-table-row-alt;
|
||||
}
|
||||
}
|
||||
blockquote {
|
||||
border-color: rgba($article-note-text, .25);
|
||||
p { color: rgba($article-note-text, .6); }
|
||||
}
|
||||
.code-tabs-wrapper .code-tabs a {
|
||||
background: transparent;
|
||||
color: rgba($article-note-text, .5);
|
||||
&:hover { color: rgba($article-note-text, 1); }
|
||||
&.is-active {
|
||||
color: rgba($article-note-text, 1);
|
||||
background: $article-note-code-bg};
|
||||
}
|
||||
hr, .expand {
|
||||
border-color: rgba($article-note-base, .4) !important;
|
||||
}
|
||||
|
||||
///////////////////////////////// Scrollbars /////////////////////////////////
|
||||
pre { @include scrollbar($article-note-code-bg, $article-note-code-scrollbar); }
|
||||
table { @include scrollbar($article-note-table-row-alt, $article-note-table-scrollbar); }
|
||||
|
||||
///////////////////////////// Syntax Highlighting ////////////////////////////
|
||||
.highlight {
|
||||
color: $article-note-code;
|
||||
|
||||
.gh,.go,.na,.nt,.nv,.ow
|
||||
{ color: $article-note-code }
|
||||
.c,.ch,.cm,.cpf,.c1, .cs,.w
|
||||
{ color: $article-note-code-accent1; }
|
||||
.gi
|
||||
{ background-color: $article-note-code-accent1; }
|
||||
.k,.kc,.kd,.kn,.kp,.kr,.nn
|
||||
{ color: $article-note-code-accent2; }
|
||||
.bp,.cp,.dl,.gt,.gu,.kt,.nb,.nc,.no,.sa,.sb,.sc,.sd,.se,.sh,.sx,.sr,.s1,.s2
|
||||
{ color: $article-note-code-accent3 }
|
||||
.err,.fm,.gr,.gd,.nd,.ne,.nf,.nl,.si
|
||||
{ color: $article-note-code-accent4 }
|
||||
.m,.ni,.mb,.mf,.mh,.mi,.mo,.vc,.vg,.vi,.vm,.il
|
||||
{ color: $article-note-code-accent5 }
|
||||
.gp,.o
|
||||
{ color: $article-note-code-accent6 }
|
||||
.ss
|
||||
{ color: $article-note-code-accent7 }
|
||||
// Javascript / Flux specific styles (duration and time values)
|
||||
.language-js {
|
||||
.mi + .nx, .mf + .nx { color: $article-note-code-accent5; }
|
||||
}
|
||||
// SQL / InfluxQL specific styles (duration values)
|
||||
.language-sql {
|
||||
.mi + .n { color: $article-note-code-accent5; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
.tip {
|
||||
&:before {
|
||||
content: "T";
|
||||
color: #fff;
|
||||
background: $article-tip-base;
|
||||
}
|
||||
|
||||
border-color: $article-tip-base;
|
||||
background: rgba($article-tip-base, .12);
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: $article-tip-heading;
|
||||
a {
|
||||
color: inherit !important;
|
||||
&:hover{color: inherit !important}
|
||||
}
|
||||
}
|
||||
p,li { color: $article-tip-text; }
|
||||
strong {color: $article-tip-strong;}
|
||||
a {
|
||||
color: $article-tip-link;
|
||||
code:after {border-color: transparent rgba($article-tip-code, .35) transparent transparent;}
|
||||
&:hover {
|
||||
color: $article-tip-link-hover;
|
||||
code:after {border-color: transparent $article-tip-link-hover transparent transparent;}
|
||||
}
|
||||
}
|
||||
ol li:before {color: $article-tip-text;}
|
||||
code, pre{
|
||||
color: $article-tip-code;
|
||||
background: $article-tip-code-bg;
|
||||
}
|
||||
img {
|
||||
box-shadow: 1px 3px 10px $article-tip-shadow;
|
||||
}
|
||||
table{
|
||||
color: $article-tip-text;
|
||||
box-shadow: 1px 3px 10px $article-tip-shadow;
|
||||
thead{@include gradient($article-tip-table-header);}
|
||||
tr:nth-child(even) td {background: $article-tip-table-row-alt;}
|
||||
}
|
||||
blockquote {
|
||||
border-color: rgba($article-tip-text, .25);
|
||||
p { color: rgba($article-tip-text, .6); }
|
||||
}
|
||||
.code-tabs-wrapper .code-tabs a {
|
||||
background: transparent;
|
||||
color: rgba($article-tip-text, .5);
|
||||
&:hover { color: rgba($article-tip-text, 1); }
|
||||
&.is-active {
|
||||
color: rgba($article-tip-text, 1);
|
||||
background: $article-tip-code-bg};
|
||||
}
|
||||
|
||||
&.flex {
|
||||
font-style: italic;
|
||||
display: flex;
|
||||
div:first-child {
|
||||
position: relative;
|
||||
margin: .25rem 2rem 0 0;
|
||||
}
|
||||
}
|
||||
hr, .expand {
|
||||
border-color: rgba($article-tip-base, .4) !important;
|
||||
}
|
||||
|
||||
///////////////////////////////// Scrollbars /////////////////////////////////
|
||||
pre { @include scrollbar($article-tip-code-bg, $article-tip-code-scrollbar); }
|
||||
table { @include scrollbar($article-tip-table-row-alt, $article-tip-table-scrollbar); }
|
||||
|
||||
///////////////////////////// Syntax Highlighting ////////////////////////////
|
||||
.highlight {
|
||||
color: $article-tip-code;
|
||||
|
||||
.gh,.go,.na,.nt,.nv,.ow
|
||||
{ color: $article-tip-code }
|
||||
.c,.ch,.cm,.cpf,.c1, .cs,.w
|
||||
{ color: $article-tip-code-accent1; }
|
||||
.gi
|
||||
{ background-color: $article-tip-code-accent1; }
|
||||
.k,.kc,.kd,.kn,.kp,.kr,.nn
|
||||
{ color: $article-tip-code-accent2; }
|
||||
.bp,.cp,.dl,.gt,.gu,.kt,.nb,.nc,.no,.sa,.sb,.sc,.sd,.se,.sh,.sx,.sr,.s1,.s2
|
||||
{ color: $article-tip-code-accent3 }
|
||||
.err,.fm,.gr,.gd,.nd,.ne,.nf,.nl,.si
|
||||
{ color: $article-tip-code-accent4 }
|
||||
.m,.ni,.mb,.mf,.mh,.mi,.mo,.vc,.vg,.vi,.vm,.il
|
||||
{ color: $article-tip-code-accent5 }
|
||||
.gp,.o
|
||||
{ color: $article-tip-code-accent6 }
|
||||
.ss
|
||||
{ color: $article-tip-code-accent7 }
|
||||
// Javascript / Flux specific styles (duration and time values)
|
||||
.language-js {
|
||||
.mi + .nx, .mf + .nx { color: $article-tip-code-accent5; }
|
||||
}
|
||||
// SQL / InfluxQL specific styles (duration values)
|
||||
.language-sql {
|
||||
.mi + .n { color: $article-tip-code-accent5; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
.warn, .warning {
|
||||
&:before {
|
||||
content: "W";
|
||||
color: #fff;
|
||||
background: $article-warning-base;
|
||||
}
|
||||
|
||||
border-color: $article-warning-base;
|
||||
background: $article-warning-bg;
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: $article-warning-heading;
|
||||
a {
|
||||
color: inherit !important;
|
||||
&:hover{color: inherit !important}
|
||||
}
|
||||
}
|
||||
p, li {color: $article-warning-text;}
|
||||
strong { color: $article-warning-strong; }
|
||||
a {
|
||||
color: $article-warning-link !important;
|
||||
code:after {
|
||||
border-color: transparent rgba($article-warning-code, .35) transparent transparent;
|
||||
}
|
||||
&:hover {
|
||||
color: $article-warning-link-hover !important;
|
||||
code:after { border-color: transparent $article-warning-link-hover transparent transparent;}
|
||||
}
|
||||
}
|
||||
ol li:before {
|
||||
color: $article-warning-text;
|
||||
}
|
||||
code, pre{
|
||||
color: $article-warning-code;
|
||||
background: $article-warning-code-bg;
|
||||
}
|
||||
img {
|
||||
box-shadow: 1px 3px 10px $article-warning-shadow;
|
||||
}
|
||||
table{
|
||||
color: $article-warning-text;
|
||||
box-shadow: 1px 3px 10px $article-warning-shadow;
|
||||
thead{@include gradient($article-warning-table-header);}
|
||||
tr:nth-child(even) td {background: $article-warning-table-row-alt;}
|
||||
}
|
||||
blockquote {
|
||||
border-color: rgba($article-warning-text, .25);
|
||||
p { color: rgba($article-warning-text, .6); }
|
||||
}
|
||||
.code-tabs-wrapper .code-tabs a {
|
||||
background: transparent;
|
||||
color: rgba($article-warning-text, .5);
|
||||
&:hover { color: rgba($article-warning-text, 1); }
|
||||
&.is-active {
|
||||
color: rgba($article-warning-text, 1);
|
||||
background: $article-warning-code-bg};
|
||||
}
|
||||
hr, .expand {
|
||||
border-color: rgba($article-warning-base, .4) !important;
|
||||
}
|
||||
|
||||
///////////////////////////////// Scrollbars /////////////////////////////////
|
||||
pre { @include scrollbar($article-warning-code-bg, $article-warning-code-scrollbar); }
|
||||
table { @include scrollbar($article-warning-table-row-alt, $article-warning-table-scrollbar); }
|
||||
|
||||
///////////////////////////// Syntax Highlighting ////////////////////////////
|
||||
.highlight {
|
||||
color: $article-warning-code;
|
||||
|
||||
.gh,.go,.na,.nt,.nv,.ow
|
||||
{ color: $article-warning-code }
|
||||
.c,.ch,.cm,.cpf,.c1, .cs,.w
|
||||
{ color: $article-warning-code-accent1; }
|
||||
.gi
|
||||
{ background-color: $article-warning-code-accent1; }
|
||||
.k,.kc,.kd,.kn,.kp,.kr,.nn
|
||||
{ color: $article-warning-code-accent2; }
|
||||
.bp,.cp,.dl,.gt,.gu,.kt,.nb,.nc,.no,.sa,.sb,.sc,.sd,.se,.sh,.sx,.sr,.s1,.s2
|
||||
{ color: $article-warning-code-accent3 }
|
||||
.err,.fm,.gr,.gd,.nd,.ne,.nf,.nl,.si
|
||||
{ color: $article-warning-code-accent4 }
|
||||
.m,.ni,.mb,.mf,.mh,.mi,.mo,.vc,.vg,.vi,.vm,.il
|
||||
{ color: $article-warning-code-accent5 }
|
||||
.gp,.o
|
||||
{ color: $article-warning-code-accent6 }
|
||||
.ss
|
||||
{ color: $article-warning-code-accent7 }
|
||||
// Javascript / Flux specific styles (duration and time values)
|
||||
.language-js {
|
||||
.mi + .nx, .mf + .nx { color: $article-warning-code-accent5; }
|
||||
}
|
||||
// SQL / InfluxQL specific styles (duration values)
|
||||
.language-sql {
|
||||
.mi + .n { color: $article-warning-code-accent5; }
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -78,18 +78,18 @@ $article-table-row-alt: $grey20;
|
|||
$article-table-scrollbar: $g0-obsidian;
|
||||
|
||||
// Article Notes, Warnings, & Messages
|
||||
$article-note-base: $gr-viridian;
|
||||
$article-note-base: $b-pool;
|
||||
$article-note-heading: $g20-white;
|
||||
$article-note-text: $gr-honeydew;
|
||||
$article-note-strong: $gr-krypton;
|
||||
$article-note-link: $gr-wasabi;
|
||||
$article-note-link-hover: $g20-white;
|
||||
$article-note-table-header: $grad-green-dark;
|
||||
$article-note-table-row-alt: #1a3c34;
|
||||
$article-note-table-scrollbar: #162627;
|
||||
$article-note-text: $b-neutrino;
|
||||
$article-note-strong: $g20-white;
|
||||
$article-note-link: $g20-white;
|
||||
$article-note-link-hover: $b-hydrogen;
|
||||
$article-note-table-header: $grad-blue-dark;
|
||||
$article-note-table-row-alt: #1b3a58;
|
||||
$article-note-table-scrollbar: #192a3a;
|
||||
$article-note-shadow: $g0-obsidian;
|
||||
$article-note-code: $br-galaxy;
|
||||
$article-note-code-bg: #040d0e;
|
||||
$article-note-code: $b-laser;
|
||||
$article-note-code-bg: #0a071c;
|
||||
$article-note-code-accent1: #567375;
|
||||
$article-note-code-accent2: $b-pool;
|
||||
$article-note-code-accent3: $gr-viridian;
|
||||
|
|
@ -97,53 +97,93 @@ $article-note-code-accent4: $r-ruby;
|
|||
$article-note-code-accent5: #ff6db0;
|
||||
$article-note-code-accent6: $b-pool;
|
||||
$article-note-code-accent7: #e90;
|
||||
$article-note-code-scrollbar: #162627;
|
||||
$article-note-code-scrollbar: #192a3a;
|
||||
|
||||
$article-warn-base: $r-dreamsicle;
|
||||
$article-warn-bg: #ff61851a;
|
||||
$article-warn-heading: $g20-white;
|
||||
$article-warn-text: $r-tungsten;
|
||||
$article-warn-link: $r-marmelade;
|
||||
$article-warn-link-hover: $g20-white;
|
||||
$article-warn-table-header: $grad-red;
|
||||
$article-warn-table-row-alt: #4a2a2a;
|
||||
$article-warn-table-scrollbar: #1f181b;
|
||||
$article-warn-shadow: #0d0610;
|
||||
$article-warn-code: #ec6e6e;
|
||||
$article-warn-code-bg: #0d0610;
|
||||
$article-warn-code-accent1: #844c4c;
|
||||
$article-warn-code-accent2: $b-pool;
|
||||
$article-warn-code-accent3: $gr-viridian;
|
||||
$article-warn-code-accent4: $r-ruby;
|
||||
$article-warn-code-accent5: #ffb4fb;
|
||||
$article-warn-code-accent6: $b-pool;
|
||||
$article-warn-code-accent7: #e90;
|
||||
$article-warn-code-scrollbar: #2a2025;
|
||||
$article-tip-base: $gr-viridian;
|
||||
$article-tip-heading: $g20-white;
|
||||
$article-tip-text: $gr-honeydew;
|
||||
$article-tip-strong: $g20-white;
|
||||
$article-tip-link: $gr-wasabi;
|
||||
$article-tip-link-hover: $g20-white;
|
||||
$article-tip-table-header: $grad-green-dark;
|
||||
$article-tip-table-row-alt: #1a3c34;
|
||||
$article-tip-table-scrollbar: #162627;
|
||||
$article-tip-shadow: $g0-obsidian;
|
||||
$article-tip-code: $br-galaxy;
|
||||
$article-tip-code-bg: #040d0e;
|
||||
$article-tip-code-accent1: #567375;
|
||||
$article-tip-code-accent2: $b-pool;
|
||||
$article-tip-code-accent3: $gr-viridian;
|
||||
$article-tip-code-accent4: $r-ruby;
|
||||
$article-tip-code-accent5: #ff6db0;
|
||||
$article-tip-code-accent6: $b-pool;
|
||||
$article-tip-code-accent7: #e90;
|
||||
$article-tip-code-scrollbar: #162627;
|
||||
|
||||
$article-cloud-base: $b-pool;
|
||||
$article-cloud-heading: $g20-white;
|
||||
$article-cloud-text: $b-neutrino;
|
||||
$article-cloud-link: $g20-white;
|
||||
$article-cloud-link-hover: $b-hydrogen;
|
||||
$article-cloud-table-header: $grad-blue-dark;
|
||||
$article-cloud-table-row-alt: #1b3a58;
|
||||
$article-cloud-table-scrollbar: #192a3a;
|
||||
$article-cloud-shadow: $g0-obsidian;
|
||||
$article-cloud-code: $b-laser;
|
||||
$article-cloud-code-bg: #0a071c;
|
||||
$article-cloud-code-accent1: #567375;
|
||||
$article-cloud-code-accent2: $b-pool;
|
||||
$article-cloud-code-accent3: $gr-viridian;
|
||||
$article-cloud-code-accent4: $r-ruby;
|
||||
$article-cloud-code-accent5: #ff6db0;
|
||||
$article-cloud-code-accent6: $b-pool;
|
||||
$article-cloud-code-accent7: #e90;
|
||||
$article-cloud-code-scrollbar: #192a3a;
|
||||
$article-important-base: $br-galaxy;
|
||||
$article-important-heading: $g20-white;
|
||||
$article-important-text: $cp-munchkin;
|
||||
$article-important-strong: $g20-white;
|
||||
$article-important-link: #797aff;
|
||||
$article-important-link-hover: $g20-white;
|
||||
$article-important-table-header: $grad-OminousFog;
|
||||
$article-important-table-row-alt: #1b1b33;
|
||||
$article-important-table-scrollbar: #56519e;
|
||||
$article-important-shadow: #11111d;
|
||||
$article-important-code: #9f92ff;
|
||||
$article-important-code-bg: #0a071c;
|
||||
$article-important-code-accent1: #5f51ac;
|
||||
$article-important-code-accent2: $br-pulsar;
|
||||
$article-important-code-accent3: #009a64;
|
||||
$article-important-code-accent4: $r-ruby;
|
||||
$article-important-code-accent5: #e24bbb;
|
||||
$article-important-code-accent6: #00a5eb;
|
||||
$article-important-code-accent7: #e90;
|
||||
$article-important-code-scrollbar: #56519e;
|
||||
|
||||
$article-enterprise-base: $br-galaxy;
|
||||
$article-enterprise-text: $cp-melrose;
|
||||
$article-enterprise-link: $cp-titan;
|
||||
$article-enterprise-link-hover: $g20-white;
|
||||
$article-warning-base: $r-dreamsicle;
|
||||
$article-warning-bg: #ff61851a;
|
||||
$article-warning-heading: $g20-white;
|
||||
$article-warning-text: $r-tungsten;
|
||||
$article-warning-strong: $g20-white;
|
||||
$article-warning-link: $r-marmelade;
|
||||
$article-warning-link-hover: $g20-white;
|
||||
$article-warning-table-header: $grad-red;
|
||||
$article-warning-table-row-alt: #4a2a2a;
|
||||
$article-warning-table-scrollbar: #1f181b;
|
||||
$article-warning-shadow: #0d0610;
|
||||
$article-warning-code: #ec6e6e;
|
||||
$article-warning-code-bg: #0d0610;
|
||||
$article-warning-code-accent1: #844c4c;
|
||||
$article-warning-code-accent2: $b-pool;
|
||||
$article-warning-code-accent3: $gr-viridian;
|
||||
$article-warning-code-accent4: $r-ruby;
|
||||
$article-warning-code-accent5: #ffb4fb;
|
||||
$article-warning-code-accent6: $b-pool;
|
||||
$article-warning-code-accent7: #e90;
|
||||
$article-warning-code-scrollbar: #2a2025;
|
||||
|
||||
$article-caution-base: $br-new-magenta;
|
||||
$article-caution-bg: rgba($br-new-magenta, .12);
|
||||
$article-caution-heading: $g20-white;
|
||||
$article-caution-text: #fe9dbc;
|
||||
$article-caution-strong: $g20-white;
|
||||
$article-caution-link: #f3d5e1;
|
||||
$article-caution-link-hover: $g20-white;
|
||||
$article-caution-table-header: $br-new-magenta, #ff598f;
|
||||
$article-caution-table-row-alt: #391b35;
|
||||
$article-caution-table-scrollbar: #912a58;
|
||||
$article-caution-shadow: #0f050e;
|
||||
$article-caution-code: #ff679b;
|
||||
$article-caution-code-bg: #0d0610;
|
||||
$article-caution-code-accent1: #8f3f5a;
|
||||
$article-caution-code-accent2: #3d97e5;
|
||||
$article-caution-code-accent3: #947eff;
|
||||
$article-caution-code-accent4: $r-ruby;
|
||||
$article-caution-code-accent5: #ffa0de;
|
||||
$article-caution-code-accent6: #3d97e5;
|
||||
$article-caution-code-accent7: #e90;
|
||||
$article-caution-code-scrollbar: #5c1b38;
|
||||
|
||||
// Article Tabs for tabbed content
|
||||
$article-tab-text: $g12-forge;
|
||||
|
|
|
|||
|
|
@ -78,73 +78,113 @@ $article-table-header: $grad-Miyazakisky !default;
|
|||
$article-table-row-alt: #f6f8ff !default;
|
||||
$article-table-scrollbar: $g14-chromium !default;
|
||||
|
||||
// Article Notes & Warnings
|
||||
$article-note-base: $gr-rainforest !default;
|
||||
$article-note-heading: $gr-emerald !default;
|
||||
$article-note-text: $gr-emerald !default;
|
||||
$article-note-strong: $gr-emerald !default;
|
||||
$article-note-link: $b-ocean !default;
|
||||
$article-note-link-hover: $br-magenta !default;
|
||||
$article-note-table-header: $grad-green-dark !default;
|
||||
$article-note-table-row-alt: #d6f5e9 !default;
|
||||
$article-note-table-scrollbar: #87DABE !default;
|
||||
$article-note-shadow: #8CB7AB !default;
|
||||
$article-note-code: #0A6F75 !default;
|
||||
$article-note-code-bg: #BDF3DA !default;
|
||||
$article-note-code-accent1: #6abba0 !default;
|
||||
$article-note-code-accent2: #0084d6 !default;
|
||||
$article-note-code-accent3: #5d52d6 !default;
|
||||
// Article Alert Blocks
|
||||
$article-note-base: $b-pool !default;
|
||||
$article-note-heading: $b-ocean !default;
|
||||
$article-note-text: $b-ocean !default;
|
||||
$article-note-strong: $b-ocean !default;
|
||||
$article-note-link: $p-void !default;
|
||||
$article-note-link-hover: $p-star !default;
|
||||
$article-note-table-header: $grad-blue !default;
|
||||
$article-note-table-row-alt: $g20-white !default;
|
||||
$article-note-table-scrollbar: $b-hydrogen !default;
|
||||
$article-note-shadow: #6EB8E4 !default;
|
||||
$article-note-code: $b-ocean !default;
|
||||
$article-note-code-bg: $b-neutrino !default;
|
||||
$article-note-code-accent1: #3CAAE2 !default;
|
||||
$article-note-code-accent2: $br-pulsar !default;
|
||||
$article-note-code-accent3: #048E66 !default;
|
||||
$article-note-code-accent4: $r-ruby !default;
|
||||
$article-note-code-accent5: #e24bbb !default;
|
||||
$article-note-code-accent6: #0084d6 !default;
|
||||
$article-note-code-accent7: #e90 !default;
|
||||
$article-note-code-scrollbar: #87DABE !default;
|
||||
$article-note-code-scrollbar: $b-laser !default;
|
||||
|
||||
$article-warn-base: $r-dreamsicle !default;
|
||||
$article-warn-bg: rgba($r-dreamsicle, .08) !default;
|
||||
$article-warn-heading: $r-fire !default;
|
||||
$article-warn-text: $r-curacao !default;
|
||||
$article-warn-link: $r-curacao !default;
|
||||
$article-warn-link-hover: $br-pulsar !default;
|
||||
$article-warn-table-header: $grad-red !default;
|
||||
$article-warn-table-row-alt: #ffe6df !default;
|
||||
$article-warn-table-scrollbar: #FFB1B1 !default;
|
||||
$article-warn-shadow: #b98a7d !default;
|
||||
$article-warn-code: #d0154e !default;
|
||||
$article-warn-code-bg: #fde6e5 !default;
|
||||
$article-warn-code-accent1: #fd99b8 !default;
|
||||
$article-warn-code-accent2: #357ae8 !default;
|
||||
$article-warn-code-accent3: #6c59cc !default;
|
||||
$article-warn-code-accent4: $r-ruby !default;
|
||||
$article-warn-code-accent5: #6a0a6f !default;
|
||||
$article-warn-code-accent6: #357ae8 !default;
|
||||
$article-warn-code-accent7: #e90 !default;
|
||||
$article-warn-code-scrollbar: #FFB1B1 !default;
|
||||
$article-tip-base: $gr-rainforest !default;
|
||||
$article-tip-heading: $gr-emerald !default;
|
||||
$article-tip-text: $gr-emerald !default;
|
||||
$article-tip-strong: $gr-emerald !default;
|
||||
$article-tip-link: $b-ocean !default;
|
||||
$article-tip-link-hover: $br-magenta !default;
|
||||
$article-tip-table-header: $grad-green-dark !default;
|
||||
$article-tip-table-row-alt: #d6f5e9 !default;
|
||||
$article-tip-table-scrollbar: #87DABE !default;
|
||||
$article-tip-shadow: #8CB7AB !default;
|
||||
$article-tip-code: #0A6F75 !default;
|
||||
$article-tip-code-bg: #BDF3DA !default;
|
||||
$article-tip-code-accent1: #6abba0 !default;
|
||||
$article-tip-code-accent2: #0084d6 !default;
|
||||
$article-tip-code-accent3: #5d52d6 !default;
|
||||
$article-tip-code-accent4: $r-ruby !default;
|
||||
$article-tip-code-accent5: #e24bbb !default;
|
||||
$article-tip-code-accent6: #0084d6 !default;
|
||||
$article-tip-code-accent7: #e90 !default;
|
||||
$article-tip-code-scrollbar: #87DABE !default;
|
||||
|
||||
$article-cloud-base: $b-laser !default;
|
||||
$article-cloud-heading: $b-ocean !default;
|
||||
$article-cloud-text: $b-ocean !default;
|
||||
$article-cloud-link: $b-ocean !default;
|
||||
$article-cloud-link-hover: $br-pulsar !default;
|
||||
$article-cloud-table-header: $grad-blue !default;
|
||||
$article-cloud-table-row-alt: $g20-white !default;
|
||||
$article-cloud-table-scrollbar: $b-hydrogen !default;
|
||||
$article-cloud-shadow: #6EB8E4 !default;
|
||||
$article-cloud-code: $b-ocean !default;
|
||||
$article-cloud-code-bg: $b-neutrino !default;
|
||||
$article-cloud-code-accent1: #3CAAE2 !default;
|
||||
$article-cloud-code-accent2: $br-pulsar !default;
|
||||
$article-cloud-code-accent3: #048E66 !default;
|
||||
$article-cloud-code-accent4: $r-ruby !default;
|
||||
$article-cloud-code-accent5: #e24bbb !default;
|
||||
$article-cloud-code-accent6: #0084d6 !default;
|
||||
$article-cloud-code-accent7: #e90 !default;
|
||||
$article-cloud-code-scrollbar: $b-laser !default;
|
||||
$article-important-base: $br-galaxy !default;
|
||||
$article-important-heading: $br-pulsar !default;
|
||||
$article-important-text: #5a55ad !default;
|
||||
$article-important-strong: $br-pulsar !default;
|
||||
$article-important-link: $b-ocean !default;
|
||||
$article-important-link-hover: $br-pulsar !default;
|
||||
$article-important-table-header: $grad-OminousFog !default;
|
||||
$article-important-table-row-alt: $g20-white !default;
|
||||
$article-important-table-scrollbar: $br-galaxy !default;
|
||||
$article-important-shadow: #b0a6eb !default;
|
||||
$article-important-code: $br-pulsar !default;
|
||||
$article-important-code-bg: #dadbff !default;
|
||||
$article-important-code-accent1: #998aeb !default;
|
||||
$article-important-code-accent2: $br-pulsar !default;
|
||||
$article-important-code-accent3: #026d33 !default;
|
||||
$article-important-code-accent4: $r-ruby !default;
|
||||
$article-important-code-accent5: #e24bbb !default;
|
||||
$article-important-code-accent6: #0084d6 !default;
|
||||
$article-important-code-accent7: #e90 !default;
|
||||
$article-important-code-scrollbar: $br-galaxy !default;
|
||||
|
||||
$article-enterprise-base: $br-galaxy !default;
|
||||
$article-enterprise-text: $cp-marguerite !default;
|
||||
$article-enterprise-link: $cp-marguerite !default;
|
||||
$article-enterprise-link-hover: $b-pool !default;
|
||||
$article-warning-base: $r-dreamsicle !default;
|
||||
$article-warning-bg: rgba($r-dreamsicle, .08) !default;
|
||||
$article-warning-heading: $r-fire !default;
|
||||
$article-warning-text: $r-curacao !default;
|
||||
$article-warning-strong: $br-pulsar !default;
|
||||
$article-warning-link: $r-curacao !default;
|
||||
$article-warning-link-hover: $br-pulsar !default;
|
||||
$article-warning-table-header: $grad-red !default;
|
||||
$article-warning-table-row-alt: #ffe6df !default;
|
||||
$article-warning-table-scrollbar: #FFB1B1 !default;
|
||||
$article-warning-shadow: #b98a7d !default;
|
||||
$article-warning-code: #d0154e !default;
|
||||
$article-warning-code-bg: #fde6e5 !default;
|
||||
$article-warning-code-accent1: #fd99b8 !default;
|
||||
$article-warning-code-accent2: #357ae8 !default;
|
||||
$article-warning-code-accent3: #6c59cc !default;
|
||||
$article-warning-code-accent4: $r-ruby !default;
|
||||
$article-warning-code-accent5: #6a0a6f !default;
|
||||
$article-warning-code-accent6: #357ae8 !default;
|
||||
$article-warning-code-accent7: #e90 !default;
|
||||
$article-warning-code-scrollbar: #FFB1B1 !default;
|
||||
|
||||
$article-caution-base: $br-new-magenta !default;
|
||||
$article-caution-bg: rgba($br-new-magenta, .08) !default;
|
||||
$article-caution-heading: $br-new-magenta !default;
|
||||
$article-caution-text: #db4692 !default;
|
||||
$article-caution-strong: $br-new-magenta !default;
|
||||
$article-caution-link: #8709d3 !default;
|
||||
$article-caution-link-hover: $b-sapphire !default;
|
||||
$article-caution-table-header: $br-new-magenta, #ff598f !default;
|
||||
$article-caution-table-row-alt: #fff8fc !default;
|
||||
$article-caution-table-scrollbar: #faa9cd !default;
|
||||
$article-caution-shadow: #f1b1cb !default;
|
||||
$article-caution-code: #d01578 !default;
|
||||
$article-caution-code-bg: #f3d5e1 !default;
|
||||
$article-caution-code-accent1: #e683b7 !default;
|
||||
$article-caution-code-accent2: #356ec8 !default;
|
||||
$article-caution-code-accent3: #5845b7 !default;
|
||||
$article-caution-code-accent4: $r-ruby !default;
|
||||
$article-caution-code-accent5: #6a0a6f !default;
|
||||
$article-caution-code-accent6: #357ae8 !default;
|
||||
$article-caution-code-accent7: #e90 !default;
|
||||
$article-caution-code-scrollbar: #faa9cd !default;
|
||||
|
||||
// Article Tabs for tabbed content
|
||||
$article-tab-text: $g8-storm !default;
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ body {
|
|||
@import "tools/icon-fonts/icomoon-v2";
|
||||
@import "tools/icon-fonts/icon-v3";
|
||||
@import "tools/icon-fonts/icon-v4";
|
||||
@import "tools/icon-fonts/alert-icons";
|
||||
|
||||
.v3 {font-family: 'icomoon-v3'}
|
||||
.v4 {font-family: 'icomoon-v4'}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
@font-face {
|
||||
font-family: 'alert-icons';
|
||||
src: url('fonts/alert-icons.eot?d0dznh');
|
||||
src: url('fonts/alert-icons.eot?d0dznh#iefix') format('embedded-opentype'),
|
||||
url('fonts/alert-icons.ttf?d0dznh') format('truetype'),
|
||||
url('fonts/alert-icons.woff?d0dznh') format('woff'),
|
||||
url('fonts/alert-icons.svg?d0dznh#icomoon') format('svg');
|
||||
font-weight: normal;
|
||||
font-style: normal;
|
||||
font-display: block;
|
||||
}
|
||||
|
||||
[class^="alert-icon-"], [class*=" alert-icon-"] {
|
||||
/* use !important to prevent issues with browser extensions that change fonts */
|
||||
font-family: 'alert-icons' !important;
|
||||
speak: never;
|
||||
font-style: normal;
|
||||
font-weight: normal;
|
||||
font-variant: normal;
|
||||
text-transform: none;
|
||||
line-height: 1;
|
||||
|
||||
/* Better Font Rendering =========== */
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
.alert-icon-caution:before {
|
||||
content: "\43";
|
||||
}
|
||||
.alert-icon-important:before {
|
||||
content: "\49";
|
||||
}
|
||||
.alert-icon-note:before {
|
||||
content: "\4e";
|
||||
}
|
||||
.alert-icon-tip:before {
|
||||
content: "\54";
|
||||
}
|
||||
.alert-icon-warning:before {
|
||||
content: "\57";
|
||||
}
|
||||
|
|
@ -36,6 +36,7 @@ services:
|
|||
- local
|
||||
- lint
|
||||
cloud-pytest:
|
||||
container_name: cloud-pytest
|
||||
image: influxdata/docs-pytest
|
||||
build:
|
||||
context: .
|
||||
|
|
@ -85,6 +86,7 @@ services:
|
|||
target: /app/content
|
||||
working_dir: /app
|
||||
cloud-dedicated-pytest:
|
||||
container_name: cloud-dedicated-pytest
|
||||
image: influxdata/docs-pytest
|
||||
build:
|
||||
context: .
|
||||
|
|
@ -139,6 +141,7 @@ services:
|
|||
target: /app/content
|
||||
working_dir: /app
|
||||
cloud-serverless-pytest:
|
||||
container_name: cloud-serverless-pytest
|
||||
image: influxdata/docs-pytest
|
||||
build:
|
||||
context: .
|
||||
|
|
@ -188,6 +191,7 @@ services:
|
|||
target: /app/content
|
||||
working_dir: /app
|
||||
clustered-pytest:
|
||||
container_name: clustered-pytest
|
||||
image: influxdata/docs-pytest
|
||||
build:
|
||||
context: .
|
||||
|
|
@ -242,6 +246,7 @@ services:
|
|||
target: /app/content
|
||||
working_dir: /app
|
||||
telegraf-pytest:
|
||||
container_name: telegraf-pytest
|
||||
image: influxdata/docs-pytest
|
||||
build:
|
||||
context: .
|
||||
|
|
@ -290,6 +295,7 @@ services:
|
|||
target: /app/content
|
||||
working_dir: /app
|
||||
v2-pytest:
|
||||
container_name: v2-pytest
|
||||
image: influxdata/docs-pytest
|
||||
build:
|
||||
context: .
|
||||
|
|
@ -339,6 +345,7 @@ services:
|
|||
target: /app/content
|
||||
working_dir: /app
|
||||
influxdb2:
|
||||
container_name: influxdb2
|
||||
image: influxdb:2
|
||||
ports:
|
||||
- 8086:8086
|
||||
|
|
@ -366,10 +373,10 @@ services:
|
|||
source: influxdb2-config
|
||||
target: /etc/influxdb2
|
||||
remark-lint:
|
||||
container_name: remark-lint
|
||||
build:
|
||||
context: .
|
||||
dockerfile: .ci/Dockerfile.remark
|
||||
command: ["remark", "${CONTENT_PATH}"]
|
||||
profiles:
|
||||
- lint
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -10,6 +10,19 @@ aliases:
|
|||
- /chronograf/v1/about_the_project/release-notes-changelog/
|
||||
---
|
||||
|
||||
## v1.10.6 {date="2024-12-16"}
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Prevent dangerous InfluxQL statements from automatically executing.
|
||||
- Fix Hosts page loading when there is a large number of connections.
|
||||
- Support InfluxDB Enterprise when using the Flux Query Builder.
|
||||
- Support metaqueries for InfluxDB Cloud Serverless environments.
|
||||
|
||||
### Dependency updates
|
||||
|
||||
- Upgrade Go to 1.22.7.
|
||||
|
||||
## v1.10.5 {date="2024-05-31"}
|
||||
|
||||
### Dependency updates
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
title: Manage Chronograf security
|
||||
description: Manage Chronograf security with OAuth 2.0 providers.
|
||||
aliases: /chronograf/v1/administration/security-best-practices/
|
||||
aliases:
|
||||
- /chronograf/v1/administration/security-best-practices/
|
||||
menu:
|
||||
chronograf_v1:
|
||||
name: Manage Chronograf security
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@ menu:
|
|||
name: Frequently asked questions (FAQs)
|
||||
weight: 10
|
||||
parent: Troubleshoot
|
||||
aliases:
|
||||
- /chronograf/latest/guides/transition-web-admin-interface/
|
||||
---
|
||||
|
||||
## How do I connect Chronograf to an InfluxDB Enterprise cluster?
|
||||
|
|
|
|||
|
|
@ -19,6 +19,39 @@ InfluxDB Enterprise builds are available. For more information, see
|
|||
[FIPS-compliant InfluxDB Enterprise builds](/enterprise_influxdb/v1/introduction/installation/fips-compliant/).
|
||||
{{% /note %}}
|
||||
|
||||
## v1.11.8 {date="2024-11-15"}
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Strip double quotes from measurement names in the [`/api/v2/delete` compatibility
|
||||
API](/enterprise_influxdb/v1/tools/api/#apiv2delete-http-endpoint) before
|
||||
string comparisons (e.g. to allow special characters in measurement names).
|
||||
- Enable SHA256 for FIPS RPMs.
|
||||
|
||||
## v1.11.7 {date="2024-09-19"}
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Log errors when RPC calls fail for `MetaExecutor` operations like
|
||||
`SHOW TAG VALUES` and `SHOW TAG KEYS`.
|
||||
- Prevent `GROUP BY` queries with an offset that crossed a DST boundary from failing.
|
||||
- Ensure `range()` filters correctly for all years.
|
||||
|
||||
### Features
|
||||
|
||||
- Run the `FIPS POST` if available and log the result.
|
||||
- Add support for LDAP over SSL (LDAPS).
|
||||
- Improve performance of `SHOW TAG VALUES` when using FGA by optimizing queries
|
||||
to be limited to only tag values the user has permission to access.
|
||||
|
||||
### Other
|
||||
|
||||
- Upgrade to Go 1.22.7.
|
||||
- Upgrade `jwt-go`.
|
||||
- Upgrade `dvsekhvalnov/jose2go` to v1.6.0.
|
||||
|
||||
---
|
||||
|
||||
## v1.11.6 {date="2024-08-02"}
|
||||
|
||||
### Bug Fixes
|
||||
|
|
|
|||
|
|
@ -11,33 +11,40 @@ menu:
|
|||
parent: Administration
|
||||
---
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Backup and restore utilities](#backup-and-restore-utilities)
|
||||
- [Exporting and importing data](#exporting-and-importing-data)
|
||||
Use the InfluxDB Enterprise `backup`, `restore`, `export` and `import` utilities
|
||||
to prevent unexpected data loss and preserve the ability to restore data if it
|
||||
ever is lost.
|
||||
|
||||
## Overview
|
||||
|
||||
When deploying InfluxDB Enterprise in production environments, you should have a strategy and procedures for backing up and restoring your InfluxDB Enterprise clusters to be prepared for unexpected data loss.
|
||||
|
||||
The tools provided by InfluxDB Enterprise can be used to:
|
||||
You can use these tools in your back up and restore procedures to:
|
||||
|
||||
- Provide disaster recovery due to unexpected events
|
||||
- Migrate data to new environments or servers
|
||||
- Restore clusters to a consistent state
|
||||
- Debugging
|
||||
- Export and import data for debugging
|
||||
|
||||
Depending on the volume of data to be protected and your application requirements, InfluxDB Enterprise offers two methods, described below, for managing backups and restoring data:
|
||||
|
||||
- [Backup and restore utilities](#backup-and-restore-utilities) — For most applications
|
||||
- [Exporting and importing data](#exporting-and-importing-data) — For large datasets
|
||||
- [Backup and restore utilities](#backup-and-restore-utilities): Use for most applications
|
||||
- [Exporting and importing data](#exporting-and-importing-data): Use for very large datasets and to debug data
|
||||
|
||||
> **Note:** Use the [`backup` and `restore` utilities (InfluxDB OSS 1.5 and later)](/enterprise_influxdb/v1/administration/backup-and-restore/) to:
|
||||
> [!Note]
|
||||
> #### Back up and restore between InfluxDB Enterprise and OSS
|
||||
>
|
||||
> Use the `backup` and `restore` utilities in
|
||||
> [InfluxDB Enterprise](#backup-and-restore-utilities) and
|
||||
> [InfluxDB OSS (version 1.5 and later)](/influxdb/v1/administration/backup-and-restore/) to:
|
||||
>
|
||||
> - Restore InfluxDB Enterprise backup files to InfluxDB OSS instances.
|
||||
> - Back up InfluxDB OSS data that can be restored in InfluxDB Enterprise clusters.
|
||||
|
||||
## Backup and restore utilities
|
||||
|
||||
Use InfluxDB Enterprise back up and restore utilities to:
|
||||
|
||||
- Back up and restore multiple databases at a time.
|
||||
- Back up specific time ranges.
|
||||
- Create backup files compatible with InfluxDB OSS.
|
||||
|
||||
InfluxDB Enterprise supports backing up and restoring data in a cluster,
|
||||
a single database and retention policy, and single shards.
|
||||
Most InfluxDB Enterprise applications can use the backup and restore utilities.
|
||||
|
|
@ -46,11 +53,20 @@ Use the `backup` and `restore` utilities to back up and restore between `influxd
|
|||
instances with the same versions or with only minor version differences.
|
||||
For example, you can backup from {{< latest-patch version="1.10" >}} and restore on {{< latest-patch >}}.
|
||||
|
||||
- [Backup utility](#backup-utility)
|
||||
- [Examples](#examples)
|
||||
- [Restore utility](#restore-utility)
|
||||
- [Exporting and importing data](#exporting-and-importing-data)
|
||||
- [Exporting data](#exporting-data)
|
||||
- [Importing data](#importing-data)
|
||||
- [Example](#example)
|
||||
|
||||
### Backup utility
|
||||
|
||||
A backup creates a copy of the [metastore](/enterprise_influxdb/v1/concepts/glossary/#metastore) and [shard](/enterprise_influxdb/v1/concepts/glossary/#shard) data at that point in time and stores the copy in the specified directory.
|
||||
|
||||
Or, back up **only the cluster metastore** using the `-strategy only-meta` backup option. For more information, see [perform a metastore only backup](#perform-a-metastore-only-backup).
|
||||
To back up **only the cluster metastore**, use the `-strategy only-meta` backup option.
|
||||
For more information, see how to [perform a metastore only backup](#perform-a-metastore-only-backup).
|
||||
|
||||
All backups include a manifest, a JSON file describing what was collected during the backup.
|
||||
The filenames reflect the UTC timestamp of when the backup was created, for example:
|
||||
|
|
@ -62,14 +78,19 @@ The filenames reflect the UTC timestamp of when the backup was created, for exam
|
|||
Backups can be full, metastore only, or incremental, and they are incremental by default:
|
||||
|
||||
- **Full backup**: Creates a copy of the metastore and shard data.
|
||||
- **Incremental backup**: Creates a copy of the metastore and shard data that have changed since the last incremental backup. If there are no existing incremental backups, the system automatically performs a complete backup.
|
||||
- **Incremental backup**: Creates a copy of the metastore and shard data that have changed since the last backup (the most recent backup file in the specified directory).
|
||||
If no backups exist in the directory, then the system automatically performs a full backup.
|
||||
- **Metastore only backup**: Creates a copy of the metastore data only.
|
||||
|
||||
Restoring different types of backups requires different syntax.
|
||||
To prevent issues with [restore](#restore-utility), keep full backups, metastore only backups, and incremental backups in separate directories.
|
||||
|
||||
>**Note:** The backup utility copies all data through the meta node that is used to
|
||||
execute the backup. As a result, performance of a backup and restore is typically limited by the network IO of the meta node. Increasing the resources available to this meta node (such as resizing the EC2 instance) can significantly improve backup and restore performance.
|
||||
> [!Note]
|
||||
> #### Backup and restore performance
|
||||
>
|
||||
> The backup utility copies all data through the meta node that is used to execute the backup.
|
||||
> As a result, backup and restore performance is typically limited by the network IO of the meta node.
|
||||
> Increasing the resources available to this meta node (such as resizing the EC2 instance) can significantly improve backup and restore performance.
|
||||
|
||||
#### Syntax
|
||||
|
||||
|
|
@ -77,7 +98,8 @@ execute the backup. As a result, performance of a backup and restore is typicall
|
|||
influxd-ctl [global-options] backup [backup-options] <path-to-backup-directory>
|
||||
```
|
||||
|
||||
> **Note:** The `influxd-ctl backup` command exits with `0` for success and `1` for failure. If the backup fails, output can be directed to a log file to troubleshoot.
|
||||
If successful, the `influxd-ctl backup` command exits with `0` status;
|
||||
otherwise, error (`1`) status and a message that you can use for troubleshooting.
|
||||
|
||||
##### Global flags
|
||||
|
||||
|
|
@ -89,7 +111,7 @@ for a complete list of the global `influxd-ctl` flags.
|
|||
- `-db <string>`: name of the single database to back up
|
||||
- `-from <TCP-address>`: the data node TCP address to prefer when backing up
|
||||
- `-strategy`: select the backup strategy to apply during backup
|
||||
- `incremental`: _**(Default)**_ backup only data added since the previous backup.
|
||||
- `incremental`: _**(Default)**_ backup data that is new or changed since the previous backup.
|
||||
- `full` perform a full backup. Same as `-full`
|
||||
- `only-meta` perform a backup for meta data only: users, roles,
|
||||
databases, continuous queries, retention policies. Shards are not exported.
|
||||
|
|
@ -104,38 +126,47 @@ for a complete list of the global `influxd-ctl` flags.
|
|||
|
||||
#### Back up a database and all retention policies
|
||||
|
||||
Store the following incremental backups in different directories.
|
||||
The first backup specifies `-db myfirstdb` and the second backup specifies
|
||||
different options: `-db myfirstdb` and `-rp autogen`.
|
||||
The following example stores incremental backups of the database
|
||||
and all retention policies in the `./myfirstdb-allrp-backup` directory:
|
||||
|
||||
```bash
|
||||
influxd-ctl backup -db myfirstdb ./myfirstdb-allrp-backup
|
||||
```
|
||||
|
||||
#### Back up a database with a specific retention policy
|
||||
|
||||
The following example stores incremental backups in separate directories for the
|
||||
specified database and retention policy combinations.
|
||||
|
||||
```bash
|
||||
influxd-ctl backup -db myfirstdb -rp oneday ./myfirstdb-oneday-backup
|
||||
|
||||
influxd-ctl backup -db myfirstdb -rp autogen ./myfirstdb-autogen-backup
|
||||
```
|
||||
#### Back up a database with a specific retention policy
|
||||
|
||||
Store the following incremental backups in the same directory.
|
||||
Both backups specify the same `-db` flag and the same database.
|
||||
The output contains the status and backup file paths--for example:
|
||||
|
||||
```bash
|
||||
influxd-ctl backup -db myfirstdb ./myfirstdb-allrp-backup
|
||||
|
||||
influxd-ctl backup -db myfirstdb ./myfirstdb-allrp-backup
|
||||
```sh
|
||||
backing up db=myfirstdb rp=oneday shard=8 to <USER_HOME>/myfirstdb-oneday-backup/myfirstdb.oneday.00008.00
|
||||
backing up db=myfirstdb rp=autogen shard=10 to <USER_HOME>/myfirstdb-autogen-backup/myfirstdb.autogen.00010.00
|
||||
```
|
||||
|
||||
#### Back up data from a specific time range
|
||||
|
||||
To back up data in a specific time range, use the `-start` and `-end` options:
|
||||
|
||||
```bash
|
||||
influxd-ctl backup -db myfirstdb ./myfirstdb-jandata -start 2022-01-01T012:00:00Z -end 2022-01-31T011:59:00Z
|
||||
|
||||
```
|
||||
|
||||
#### Perform an incremental backup
|
||||
|
||||
Perform an incremental backup into the current directory with the command below.
|
||||
If there are any existing backups the current directory, the system performs an incremental backup.
|
||||
If there aren't any existing backups in the current directory, the system performs a backup of all data in InfluxDB.
|
||||
The incremental backup strategy (default, `-strategy=incremental`) checks for existing backup files in the specified directory.
|
||||
|
||||
- If a backup exists, `influxd-ctl` performs an incremental backup, saving only the data that has changed since the most recent backup file.
|
||||
- If no backup exists, it creates a full backup of all data in InfluxDB.
|
||||
|
||||
The following example shows how to run an incremental backup stored in the current directory:
|
||||
|
||||
```bash
|
||||
# Syntax
|
||||
|
|
@ -156,7 +187,7 @@ $ ls
|
|||
|
||||
#### Perform a full backup
|
||||
|
||||
Perform a full backup into a specific directory with the command below.
|
||||
The following example shows how to run a full backup stored in a specific directory.
|
||||
The directory must already exist.
|
||||
|
||||
```bash
|
||||
|
|
@ -178,7 +209,11 @@ $ ls backup_dir
|
|||
|
||||
#### Perform an incremental backup on a single database
|
||||
|
||||
Point at a remote meta server and back up only one database into a given directory (the directory must already exist):
|
||||
Use the `-bind` option to specify a remote [meta node](/enterprise_influxdb/v1/concepts/glossary/#meta-node) to connect to.
|
||||
|
||||
The following example shows how to connect to a remote meta server and back up
|
||||
a specific database into a given directory in the local system.
|
||||
The directory must already exist.
|
||||
|
||||
```bash
|
||||
# Syntax
|
||||
|
|
@ -195,7 +230,8 @@ $ ls ./telegrafbackup
|
|||
|
||||
#### Perform a metadata only backup
|
||||
|
||||
Perform a metadata only backup into a specific directory with the command below.
|
||||
The following example shows how to create and store a metadata-only backup
|
||||
in a specific directory.
|
||||
The directory must already exist.
|
||||
|
||||
```bash
|
||||
|
|
@ -213,10 +249,10 @@ Backed up to backup_dir in 51.388233ms, transferred 481 bytes
|
|||
|
||||
### Restore utility
|
||||
|
||||
#### Disable anti-entropy (AE) before restoring a backup
|
||||
|
||||
> [!Note]
|
||||
> #### Disable anti-entropy (AE) before restoring a backup
|
||||
>
|
||||
> Before restoring a backup, stop the anti-entropy (AE) service (if enabled) on **each data node in the cluster, one at a time**.
|
||||
|
||||
>
|
||||
> 1. Stop the `influxd` service.
|
||||
> 2. Set `[anti-entropy].enabled` to `false` in the influx configuration file (by default, influx.conf).
|
||||
|
|
@ -469,48 +505,78 @@ for [restoring from a full backup](#restore-from-a-full-backup).
|
|||
|
||||
## Exporting and importing data
|
||||
|
||||
For most InfluxDB Enterprise applications, the [backup and restore utilities](#backup-and-restore-utilities) provide the tools you need for your backup and restore strategy. However, in some cases, the standard backup and restore utilities may not adequately handle the volumes of data in your application.
|
||||
For most InfluxDB Enterprise applications, the [backup and restore utilities](#backup-and-restore-utilities) provide the tools you need for your backup and restore strategy. However, in some cases, the standard backup and restore utilities might not adequately handle the volumes of data in your application.
|
||||
|
||||
As an alternative to the standard backup and restore utilities, use the InfluxDB `influx_inspect export` and `influx -import` commands to create backup and restore procedures for your disaster recovery and backup strategy. These commands can be executed manually or included in shell scripts that run the export and import operations at scheduled intervals (example below).
|
||||
|
||||
- [Exporting data](#exporting-data)
|
||||
- [Importing data](#importing-data)
|
||||
- [Example: export and import for disaster recovery](#example-export-and-import-for-disaster-recovery)
|
||||
|
||||
### Exporting data
|
||||
|
||||
Use the [`influx_inspect export` command](/enterprise_influxdb/v1/tools/influx_inspect#export) to export data in line protocol format from your InfluxDB Enterprise cluster. Options include:
|
||||
Use the [`influx_inspect export` command](/enterprise_influxdb/v1/tools/influx_inspect#export) to export data in line protocol format from your InfluxDB Enterprise cluster.
|
||||
Options include the following:
|
||||
|
||||
- Exporting all, or specific, databases
|
||||
- Filtering with starting and ending timestamps
|
||||
- Using gzip compression for smaller files and faster exports
|
||||
- `-database`: Export all or specific databases
|
||||
- `-start` and `-end`: Filter with starting and ending timestamps
|
||||
- `-compress`: Use GNU zip (gzip) compression for smaller files and faster exports
|
||||
|
||||
For details on optional settings and usage, see [`influx_inspect export` command](/enterprise_influxdb/v1/tools/influx_inspect#export).
|
||||
|
||||
In the following example, the database is exported filtered to include only one day and compressed for optimal speed and file size.
|
||||
The following example shows how to export data filtered to one day and compressed
|
||||
for optimal speed and file size:
|
||||
|
||||
```bash
|
||||
influx_inspect export \
|
||||
-database myDB \
|
||||
-database DATABASE_NAME \
|
||||
-compress \
|
||||
-start 2019-05-19T00:00:00.000Z \
|
||||
-end 2019-05-19T23:59:59.999Z
|
||||
```
|
||||
|
||||
### Importing data
|
||||
The exported file contains the following:
|
||||
|
||||
After exporting the data in line protocol format, you can import the data using the [`influx -import` CLI command](/enterprise_influxdb/v1/tools/influx-cli/use-influx/#import-data-from-a-file-with--import).
|
||||
```sh
|
||||
# DDL
|
||||
CREATE DATABASE <DATABASE_NAME> WITH NAME <RETENTION_POLICY>
|
||||
# DML
|
||||
# CONTEXT-DATABASE:<DATABASE_NAME>
|
||||
# CONTEXT-RETENTION-POLICY:<RETENTION_POLICY>
|
||||
|
||||
In the following example, the compressed data file is imported into the specified database.
|
||||
|
||||
```bash
|
||||
influx -import -database myDB -compressed
|
||||
<LINE_PROTOCOL_DATA>
|
||||
```
|
||||
|
||||
For details on using the `influx -import` command, see [Import data from a file with -import](/enterprise_influxdb/v1/tools/influx-cli/use-influx/#import-data-from-a-file-with--import).
|
||||
- `DDL`: an InfluxQL `CREATE` statement to create the target database when [importing the data](#importing-data)
|
||||
- `DML`: Context metadata that specifies the target database and retention policy
|
||||
for [importing the data](#importing-data)
|
||||
- the line protocol data
|
||||
|
||||
### Example
|
||||
For details on optional settings and usage, see [`influx_inspect export` command](/enterprise_influxdb/v1/tools/influx_inspect#export).
|
||||
|
||||
### Importing data
|
||||
|
||||
For an example of using the exporting and importing data approach for disaster recovery, see the Capital One presentation from Influxdays 2019 on ["Architecting for Disaster Recovery."](https://www.youtube.com/watch?v=LyQDhSdnm4A). In this presentation, Capital One discusses the following:
|
||||
To import line protocol data from a file, use the [`influx -import` CLI command](/enterprise_influxdb/v1/tools/influx-cli/use-influx-cli/#influx-arguments).
|
||||
|
||||
- Exporting data every 15 minutes from an active cluster to an AWS S3 bucket.
|
||||
In your import file, include the following sections:
|
||||
|
||||
- _Optional_: **DDL (Data Definition Language)**: Contains the [InfluxQL commands](/enterprise_influxdb/v1/query_language/manage-database/) for creating the relevant [database](/enterprise_influxdb/v1/concepts/glossary/#database) and managing the [retention policy](/enterprise_influxdb/v1/concepts/glossary/#retention-policy-rp).
|
||||
If your database and retention policy already exist, your file can skip this section.
|
||||
- **DML (Data Manipulation Language)**: Context metadata that specifies the database and (if desired) retention policy for the import and contains the data in [line protocol](/enterprise_influxdb/v1/concepts/glossary/#influxdb-line-protocol).
|
||||
|
||||
In the following example, the compressed data file (in GNU zip format) is imported into the database
|
||||
specified in the file's `DML` metadata.
|
||||
|
||||
```bash
|
||||
influx -import -path -compressed
|
||||
```
|
||||
|
||||
For details on using the `influx -import` command, see [Import data from a file](/enterprise_influxdb/v1/tools/influx-cli/use-influx-cli/#import-data-from-a-file).
|
||||
|
||||
### Example: export and import for disaster recovery
|
||||
|
||||
For an example of using the exporting and importing data approach for disaster recovery, see the presentation from Influxdays 2019 on ["Architecting for Disaster Recovery."](https://www.youtube.com/watch?v=LyQDhSdnm4A). In this presentation, Capital One discusses the following:
|
||||
|
||||
- Exporting data every 15 minutes from an active InfluxDB Enterprise cluster to an AWS S3 bucket.
|
||||
- Replicating the export file in the S3 bucket using the AWS S3 copy command.
|
||||
- Importing data every 15 minutes from the AWS S3 bucket to a cluster available for disaster recovery.
|
||||
- Importing data every 15 minutes from the AWS S3 bucket to an InfluxDB Enterprise cluster available for disaster recovery.
|
||||
- Advantages of the export-import approach over the standard backup and restore utilities for large volumes of data.
|
||||
- Managing users and scheduled exports and imports with a custom administration tool.
|
||||
|
|
|
|||
|
|
@ -1650,21 +1650,25 @@ Number of queries allowed to execute concurrently.
|
|||
Default is `0`.
|
||||
|
||||
#### query-initial-memory-bytes
|
||||
|
||||
Initial bytes of memory allocated for a query.
|
||||
`0` means unlimited.
|
||||
Default is `0`.
|
||||
|
||||
#### query-max-memory-bytes
|
||||
|
||||
Maximum total bytes of memory allowed for an individual query.
|
||||
`0` means unlimited.
|
||||
Default is `0`.
|
||||
|
||||
#### total-max-memory-bytes
|
||||
|
||||
Maximum total bytes of memory allowed for all running Flux queries.
|
||||
`0` means unlimited.
|
||||
Default is `0`.
|
||||
|
||||
#### query-queue-size
|
||||
|
||||
Maximum number of queries allowed in execution queue.
|
||||
When queue limit is reached, new queries are rejected.
|
||||
`0` means unlimited.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
title: Configure InfluxDB Enterprise meta modes
|
||||
title: Configure InfluxDB Enterprise meta nodes
|
||||
description: >
|
||||
Configure InfluxDB Enterprise data node settings and environmental variables.
|
||||
menu:
|
||||
|
|
|
|||
|
|
@ -175,6 +175,21 @@ enabled = true
|
|||
# Finally, "none" does not use TLS. This is not recommended for
|
||||
# production systems.
|
||||
security = "starttls"
|
||||
|
||||
# Client certificates to present to the LDAP server are supported with
|
||||
# "client-tls-certificate" and "client-tls-private-key" configurations.
|
||||
# These are paths to the X.509 client certificate and corresponding private
|
||||
# key, respectively. If "client-tls-certificate" is set but
|
||||
# "client-tls-private-key" is not, then "client-tls-certificate" is assumed
|
||||
# to bundle both the certificate and private key.
|
||||
# The LDAP server may request and require valid client certificates
|
||||
# even when InfluxDB is configured with an insecure TLS mode that ignores
|
||||
# LDAP server certificate errors.
|
||||
# Not all LDAP servers will request a client certificate. It is not
|
||||
# necessary to set "client-tls-certificate" and "client-tls-private-key"
|
||||
# if the LDAP server does not require client certificates.
|
||||
client-tls-certificate = "/var/run/secrets/ldapClient.pem"
|
||||
client-tls-private-key = "/var/run/secrets/ldapClient.key"
|
||||
|
||||
# Client certificates to present to the LDAP server are supported with
|
||||
# "client-tls-certificate" and "client-tls-private-key" configurations.
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ The following command lists every shard in our cluster:
|
|||
influxd-ctl show-shards
|
||||
```
|
||||
|
||||
The expected output is similar to the items in the codeblock below:
|
||||
The output is similar to the following:
|
||||
|
||||
```
|
||||
Shards
|
||||
|
|
@ -134,16 +134,19 @@ Take note of the cold shard's `ID` (for example: `22`) and the TCP address of
|
|||
one of its owners in the `Owners` column (for example:
|
||||
`enterprise-data-01:8088`).
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
Use the following command string to determine the size of the shards in
|
||||
your cluster:
|
||||
>
|
||||
find /var/lib/influxdb/data/ -mindepth 3 -type d -exec du -h {} \;
|
||||
> [!Note]
|
||||
>
|
||||
> To determine the size of shards in
|
||||
> your cluster, enter the following command:
|
||||
>
|
||||
> ```bash
|
||||
> find /var/lib/influxdb/data/ -mindepth 3 -type d -exec du -h {} \;
|
||||
> ```
|
||||
|
||||
In general, we recommend moving larger shards to the new data node to increase the
|
||||
available disk space on the original data nodes.
|
||||
Users should note that moving shards will impact network traffic.
|
||||
|
||||
_Moving shards will impact network traffic._
|
||||
|
||||
### Step 3: Copy Cold Shards
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ Monitoring is the act of observing changes in data over time.
|
|||
There are multiple ways to monitor your InfluxDB Enterprise cluster.
|
||||
See the guides below to monitor a cluster using another InfluxDB instance.
|
||||
|
||||
Alternatively, to view your output data occasionally (_e.g._, for auditing or diagnostics),
|
||||
Alternatively, to view your output data occasionally (for example, for auditing or diagnostics),
|
||||
do one of the following:
|
||||
|
||||
- [Log and trace InfluxDB Enterprise operations](/enterprise_influxdb/v1/administration/monitor/logs/)
|
||||
|
|
@ -22,7 +22,7 @@ do one of the following:
|
|||
### Monitor with InfluxDB Insights
|
||||
For InfluxDB Enterprise customers, Insights is a free service that monitors your cluster and sends metrics to a private Cloud account. This allows InfluxDB Support to monitor your cluster health and access usage statistics when assisting with support tickets that you raise.
|
||||
|
||||
To apply for this service, please contact the [support team](https://support.influxdata.com/s/login/).
|
||||
To apply for this service, please [contact InfluxData support](https://support.influxdata.com).
|
||||
{{% /note %}}
|
||||
|
||||
{{< children >}}
|
||||
|
|
|
|||
|
|
@ -182,4 +182,4 @@ Send a notification to PagerDuty or HTTP endpoints (other webhooks) by [upgradin
|
|||
|
||||
For InfluxDB Enterprise customers, Insights is a free service that monitors your cluster and sends metrics to a private Cloud account. This allows InfluxDB Support to monitor your cluster health and access usage statistics when assisting with support tickets that you raise.
|
||||
|
||||
To apply for this service, please contact the [InfluxData Support team](mailto:support@influxdata.com).
|
||||
To apply for this service, please [contact InfluxData support](https://support.influxdata.com).
|
||||
|
|
|
|||
|
|
@ -449,8 +449,8 @@ Related entries: [tsm](#tsm-time-structured-merge-tree)
|
|||
|
||||
## web console
|
||||
|
||||
Legacy user interface for the InfluxDB Enterprise.
|
||||
Legacy user interface for InfluxDB Enterprise.
|
||||
|
||||
This interface has been deprecated. We recommend using [Chronograf](/chronograf/v1/introduction/).
|
||||
|
||||
If you are transitioning from the Enterprise Web Console to Chronograf, see how to [transition from the InfluxDB Web Admin Interface](/chronograf/v1/guides/transition-web-admin-interface/).
|
||||
If you are transitioning from the Enterprise Web Console to Chronograf, see how to [connect Chronograf to an InfluxDB Enterprise cluster](/chronograf/v1/troubleshooting/frequently-asked-questions/#how-do-i-connect-chronograf-to-an-influxdb-enterprise-cluster).
|
||||
|
|
|
|||
|
|
@ -22,9 +22,14 @@ Optimize your Flux queries to reduce their memory and compute (CPU) requirements
|
|||
- [Measure query performance with Flux profilers](#measure-query-performance-with-flux-profilers)
|
||||
|
||||
## Start queries with pushdowns
|
||||
**Pushdowns** are functions or function combinations that push data operations to the underlying data source rather than operating on data in memory. Start queries with pushdowns to improve query performance. Once a non-pushdown function runs, Flux pulls data into memory and runs all subsequent operations there.
|
||||
|
||||
**Pushdowns** are functions or function combinations that push data operations
|
||||
to the underlying data source rather than operating on data in memory.
|
||||
Start queries with pushdowns to improve query performance. Once a non-pushdown
|
||||
function runs, Flux pulls data into memory and runs all subsequent operations there.
|
||||
|
||||
#### Pushdown functions and function combinations
|
||||
|
||||
The following pushdowns are supported in InfluxDB Enterprise 1.10+.
|
||||
|
||||
| Functions | Supported |
|
||||
|
|
@ -63,6 +68,7 @@ Once a non-pushdown function runs, Flux pulls data into memory and runs all
|
|||
subsequent operations there.
|
||||
|
||||
##### Pushdown functions in use
|
||||
|
||||
```js
|
||||
from(bucket: "db/rp")
|
||||
|> range(start: -1h) //
|
||||
|
|
@ -75,6 +81,7 @@ from(bucket: "db/rp")
|
|||
```
|
||||
|
||||
### Avoid processing filters inline
|
||||
|
||||
Avoid using mathematic operations or string manipulation inline to define data filters.
|
||||
Processing filter values inline prevents `filter()` from pushing its operation down
|
||||
to the underlying data source, so data returned by the
|
||||
|
|
@ -104,12 +111,14 @@ from(bucket: "db/rp")
|
|||
```
|
||||
|
||||
## Avoid short window durations
|
||||
|
||||
Windowing (grouping data based on time intervals) is commonly used to aggregate and downsample data.
|
||||
Increase performance by avoiding short window durations.
|
||||
More windows require more compute power to evaluate which window each row should be assigned to.
|
||||
Reasonable window durations depend on the total time range queried.
|
||||
|
||||
## Use "heavy" functions sparingly
|
||||
|
||||
The following functions use more memory or CPU than others.
|
||||
Consider their necessity in your data processing before using them:
|
||||
|
||||
|
|
@ -120,6 +129,7 @@ Consider their necessity in your data processing before using them:
|
|||
- [pivot()](/influxdb/v2/reference/flux/stdlib/built-in/transformations/pivot/)
|
||||
|
||||
## Use set() instead of map() when possible
|
||||
|
||||
[`set()`](/influxdb/v2/reference/flux/stdlib/built-in/transformations/set/),
|
||||
[`experimental.set()`](/influxdb/v2/reference/flux/stdlib/experimental/set/),
|
||||
and [`map`](/influxdb/v2/reference/flux/stdlib/built-in/transformations/map/)
|
||||
|
|
@ -132,6 +142,7 @@ Use the following guidelines to determine which to use:
|
|||
- If dynamically setting a column value using **existing row data**, use `map()`.
|
||||
|
||||
#### Set a column value to a static value
|
||||
|
||||
The following queries are functionally the same, but using `set()` is more performant than using `map()`.
|
||||
|
||||
```js
|
||||
|
|
@ -144,12 +155,14 @@ data
|
|||
```
|
||||
|
||||
#### Dynamically set a column value using existing row data
|
||||
|
||||
```js
|
||||
data
|
||||
|> map(fn: (r) => ({ r with foo: r.bar }))
|
||||
```
|
||||
|
||||
## Balance time range and data precision
|
||||
|
||||
To ensure queries are performant, balance the time range and the precision of your data.
|
||||
For example, if you query data stored every second and request six months worth of data,
|
||||
results would include ≈15.5 million points per series.
|
||||
|
|
@ -160,7 +173,8 @@ Use [pushdowns](#pushdown-functions-and-function-combinations) to optimize how
|
|||
many points are stored in memory.
|
||||
|
||||
## Measure query performance with Flux profilers
|
||||
Use the [Flux Profiler package](/influxdb/v2/reference/flux/stdlib/profiler/)
|
||||
|
||||
Use the [Flux Profiler package](/flux/v0/stdlib/profiler/)
|
||||
to measure query performance and append performance metrics to your query output.
|
||||
The following Flux profilers are available:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,13 @@
|
|||
---
|
||||
title: Query data with the InfluxDB API
|
||||
description: Query data with Flux and InfluxQL in the InfluxDB API.
|
||||
alias:
|
||||
-/docs/v1.8/query_language/querying_data/
|
||||
menu:
|
||||
enterprise_influxdb_v1:
|
||||
weight: 20
|
||||
parent: Guides
|
||||
aliases:
|
||||
- /enterprise_influxdb/v1/guides/querying_data/
|
||||
- /docs/v1.8/query_language/querying_data/
|
||||
v2: /influxdb/v2/query-data/
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -47,11 +47,26 @@ for high availability and redundancy.
|
|||
We typically recommend three meta nodes.
|
||||
If your servers have chronic communication or reliability issues, you can try adding nodes.
|
||||
|
||||
{{% note %}}
|
||||
Deploying multiple meta nodes on the same server is strongly discouraged
|
||||
since it creates a larger point of potential failure if that particular server is unresponsive.
|
||||
InfluxData recommends deploying meta nodes on relatively small footprint servers.
|
||||
{{% /note %}}
|
||||
> [!Note]
|
||||
>
|
||||
> #### Run meta nodes on separate servers
|
||||
>
|
||||
> Avoid deploying multiple meta nodes on the same server.
|
||||
> Doing so increases the risk of failure if the server becomes unresponsive.
|
||||
> InfluxData recommends deploying meta nodes on separate, low-resource servers
|
||||
> to minimize risks and optimize performance.
|
||||
>
|
||||
> #### Using a single meta node for non-production environments
|
||||
>
|
||||
> Installing and running InfluxDB Enterprise on a single server, or node, is an
|
||||
alternative to using [InfluxDB OSS 1.x](/influxdb/v1).
|
||||
> To start a {{% product-name %}} cluster with a single meta node,
|
||||
> pass the `-single-server flag` when starting the node.
|
||||
>
|
||||
> _A cluster with only one meta node is **not** recommended for
|
||||
> production environments._
|
||||
>
|
||||
> For more information, see how to [install InfluxDB Enterprise on a single server](/enterprise_influxdb/v1/introduction/installation/single-server/).
|
||||
|
||||
_See [Clustering in InfluxDB Enterprise](/enterprise_influxdb/v1/concepts/clustering/)
|
||||
for more information about cluster architecture._
|
||||
|
|
@ -292,19 +307,12 @@ Run the following command to search for a running `influxdb-meta` process:
|
|||
ps aux | grep -v grep | grep influxdb-meta
|
||||
```
|
||||
|
||||
You should see output similar to:
|
||||
The output is similar to the following:
|
||||
|
||||
```
|
||||
influxdb 3207 0.8 4.4 483000 22168 ? Ssl 17:05 0:08 /usr/bin/influxd-meta -config /etc/influxdb/influxdb-meta.conf
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
It is possible to start the cluster with a single meta node but you
|
||||
must pass the `-single-server flag` when starting the single meta node.
|
||||
Please note that a cluster with only one meta node is **not** recommended for
|
||||
production environments.
|
||||
{{% /note %}}
|
||||
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
|
|
@ -350,9 +358,9 @@ enterprise-meta-02:8091 {{< latest-patch >}}-c{{< latest-patch >}}
|
|||
enterprise-meta-03:8091 {{< latest-patch >}}-c{{< latest-patch >}}
|
||||
```
|
||||
|
||||
Note that your cluster must have at least three meta nodes.
|
||||
_Your cluster must have at least three meta nodes.
|
||||
If you do not see your meta nodes in the output, retry adding them to
|
||||
the cluster.
|
||||
the cluster._
|
||||
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ menu:
|
|||
parent: Install
|
||||
---
|
||||
|
||||
Installing and running InfluxDB Enterprise on a single server or node is an
|
||||
Installing and running InfluxDB Enterprise on a single server, or node, is an
|
||||
alternative to using [InfluxDB OSS 1.x](/influxdb/v1).
|
||||
InfluxDB Enterprise provides advanced functionality such as
|
||||
[LDAP authentication](/enterprise_influxdb/v1/administration/configure/security/ldap/),
|
||||
|
|
@ -75,11 +75,11 @@ through server restarts.
|
|||
## Set up, configure, and start the meta service
|
||||
|
||||
The InfluxDB Enterprise meta process oversees and manages the InfluxDB Enterprise
|
||||
data process. In multi-node clusters, meta nodes manage data syncing and high
|
||||
availability of data nodes. In a single-node installation, the meta process
|
||||
data process. In multi-node clusters, meta nodes (typically 3 nodes) manage data syncing and high
|
||||
availability of data nodes. In a single-server (single-node) installation, a meta process
|
||||
and the accompanying [`influxd-ctl` utility](/enterprise_influxdb/v1/tools/influxd-ctl/)
|
||||
still manage the "cluster", even though the meta and data processes exist on the
|
||||
same server.
|
||||
still manage the "cluster", but with a single meta node and the data
|
||||
processes running on the same server.
|
||||
|
||||
1. **Download and install the InfluxDB Enterprise meta service**:
|
||||
|
||||
|
|
@ -174,10 +174,11 @@ The `license-key` and `license-path` settings are mutually exclusive and one mus
|
|||
internal-shared-secret = "<internal-shared-secret>"
|
||||
```
|
||||
|
||||
3. **Start the InfluxDB Enterprise meta service**:
|
||||
3. **Start the InfluxDB Enterprise meta service in single-server mode**:
|
||||
|
||||
Run the command appropriate to your operating system's service manager.
|
||||
<!-- Include the `-single-server` flag when starting the service. -->
|
||||
Run the `start` command appropriate to your operating system's service manager.
|
||||
In the command, include the `-single-server` flag, which ensures that the single meta node
|
||||
is the leader and has all the metadata for the cluster.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs "small" %}}
|
||||
|
|
@ -185,24 +186,93 @@ The `license-key` and `license-path` settings are mutually exclusive and one mus
|
|||
[systemd](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
<!-----------------------------BEGIN SYSVINIT---------------------------------->
|
||||
Edit the `influxdb-meta` init script to include the `-single-server` flag:
|
||||
|
||||
<!-- Potential TODO: Add instructions for passing the -single-server flag to sysvinit -->
|
||||
1. Open the init script for editing, for example:
|
||||
|
||||
```sh
|
||||
service influxdb-meta start
|
||||
```
|
||||
```bash
|
||||
sudo nano /etc/init.d/influxdb-meta
|
||||
```
|
||||
|
||||
2. Find the section of the script that starts the `influxdb-meta` service and add the `-single-server` flag--for example:
|
||||
|
||||
```sh
|
||||
case "$1" in
|
||||
start)
|
||||
echo "Starting InfluxDB Meta..."
|
||||
/usr/bin/influxdb-meta -single-server &
|
||||
;;
|
||||
stop)
|
||||
echo "Stopping InfluxDB Meta..."
|
||||
killall influxdb-meta
|
||||
;;
|
||||
restart)
|
||||
$0 stop
|
||||
$0 start
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart}"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
exit 0
|
||||
```
|
||||
|
||||
3. Restart the service to apply the changes:
|
||||
|
||||
```bash
|
||||
sudo service influxdb-meta restart
|
||||
```
|
||||
|
||||
For more information about sysvinit and initscripts, see the [sysvinit](https://wiki.gentoo.org/wiki/Sysvinit) Gentoo Linux documentation.
|
||||
<!-------------------------------END SYSVINIT---------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!------------------------------BEGIN SYSTEMD---------------------------------->
|
||||
Edit the `influxdb-meta` service unit file or a drop-in configuration file to
|
||||
include the `-single-server` flag--for example:
|
||||
|
||||
<!-- Potential TODO: Add instructions for passing the -single-server flag to systemd -->
|
||||
1. Use `systemctl edit` with the `--drop-in` option to create the drop-in configuration file for the service:
|
||||
|
||||
```sh
|
||||
sudo systemctl start influxdb-meta
|
||||
```
|
||||
```bash
|
||||
sudo systemctl edit --drop-in influxdb-meta
|
||||
```
|
||||
|
||||
2. Add the following to the drop-in configuration file to include the -single-server flag in the startup command:
|
||||
|
||||
```systemd
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/influxdb-meta -single-server
|
||||
```
|
||||
|
||||
3. Start the service using `systemctl`:
|
||||
|
||||
```bash
|
||||
sudo systemctl start influxdb-meta
|
||||
```
|
||||
|
||||
4. Reload the Systemd Daemon: Reload the systemd daemon to apply the changes:
|
||||
|
||||
```bash
|
||||
sudo systemctl daemon-reload
|
||||
```
|
||||
|
||||
5. Start the service using systemctl:
|
||||
|
||||
```bash
|
||||
sudo systemctl start influxdb-meta
|
||||
```
|
||||
|
||||
For more information about systemd unit files, see the Arch Linux documentation
|
||||
for [Writing unit files](https://wiki.archlinux.org/title/Systemd#Writing_unit_files)
|
||||
|
||||
<!--------------------------------END SYSTEMD---------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
4. **Ensure the `influxdb-meta` process is running**:
|
||||
1. **Ensure the `influxdb-meta` process is running**:
|
||||
|
||||
Use `ps aux` to list running processes and `grep` to filter the list of
|
||||
running process to those that contain `influxdb-meta` and filter out the
|
||||
|
|
@ -218,7 +288,7 @@ sudo systemctl start influxdb-meta
|
|||
influxdb 3207 0.8 4.4 483000 22168 ? Ssl 17:05 0:08 /usr/bin/influxd-meta -config /etc/influxdb/influxdb-meta.conf
|
||||
```
|
||||
|
||||
5. **Use `influxd-ctl` to add the meta process to the InfluxDB Enterprise "cluster"**:
|
||||
2. **Use `influxd-ctl` to add the meta process to the InfluxDB Enterprise "cluster"**:
|
||||
|
||||
```sh
|
||||
influxd-ctl add-meta <your-host-name>:8091
|
||||
|
|
@ -230,7 +300,7 @@ sudo systemctl start influxdb-meta
|
|||
Added meta node x at <your-host-name>:8091
|
||||
```
|
||||
|
||||
6. **Use `influxd-ctl` to verify the meta node was added to the InfluxDB Enterprise "cluster"**:
|
||||
3. **Use `influxd-ctl` to verify the meta node was added to the InfluxDB Enterprise "cluster"**:
|
||||
|
||||
```sh
|
||||
influxd-ctl show
|
||||
|
|
|
|||
|
|
@ -31,11 +31,13 @@ The following are the most frequently overlooked requirements when installing a
|
|||
- [Synchronize time between hosts](#synchronize-time-between-hosts)
|
||||
- [Use SSDs](#use-ssds)
|
||||
- [Do not use NFS or NFS-based services](#do-not-use-nfs-or-nfs-based-services)
|
||||
- [Do not use LVM](#do-not-use-lvm)
|
||||
- [Disable swap](#disable-swap)
|
||||
- [Use three and only three meta nodes](#use-three-and-only-three-meta-nodes)
|
||||
- [Meta and data nodes are fully independent](#meta-and-data-nodes-are-fully-independent)
|
||||
- [Install Chronograf last](#install-chronograf-last)
|
||||
|
||||
|
||||
#### Ensure connectivity between machines
|
||||
|
||||
All nodes in the cluster must be able to resolve each other by hostname or IP,
|
||||
|
|
@ -64,6 +66,13 @@ or services such as [AWS EFS](https://aws.amazon.com/efs/),
|
|||
[Google Filestore](https://cloud.google.com/filestore), or
|
||||
[Azure files](https://azure.microsoft.com/en-us/services/storage/files/).
|
||||
|
||||
#### Do not use LVM
|
||||
|
||||
Don't use LVM for software RAID, JBOD, or disk encryption.
|
||||
These use cases can lead to performance issues.
|
||||
|
||||
If you use LVM solely for creating logical volumes, use it with Device Mapper’s linear mapping for optimal performance.
|
||||
|
||||
#### Disable swap
|
||||
|
||||
To avoid potential disk contention when InfluxDB is under high load,
|
||||
|
|
|
|||
|
|
@ -249,7 +249,7 @@ curl --request POST "http://localhost:8086/api/v2/delete?bucket=exampleDB/autoge
|
|||
}'
|
||||
```
|
||||
|
||||
If you use the `predicate` option in your request, review [delete predicate syntax](/influxdb/latest/reference/syntax/delete-predicate/) and note its [limitations](/influxdb/latest/reference/syntax/delete-predicate/#limitations).
|
||||
If you use the `predicate` option in your request, review [delete predicate syntax](/influxdb/v2/reference/syntax/delete-predicate/) and note its [limitations](/influxdb/v2/reference/syntax/delete-predicate/#limitations).
|
||||
|
||||
## InfluxDB 1.x HTTP endpoints
|
||||
The following InfluxDB 1.x API endpoints are available:
|
||||
|
|
|
|||
|
|
@ -18,15 +18,15 @@ provides Flux syntax highlighting, autocompletion, and a direct InfluxDB server
|
|||
integration that lets you run Flux scripts natively and show results in VS Code.
|
||||
|
||||
{{% note %}}
|
||||
#### Enable Flux in InfluxDB 1.8
|
||||
To use the Flux VS Code extension with InfluxDB 1.8, ensure Flux is enabled in
|
||||
#### Enable Flux in InfluxDB 1.11
|
||||
To use the Flux VS Code extension with InfluxDB 1.11, ensure Flux is enabled in
|
||||
your InfluxDB configuration file.
|
||||
For more information, see [Enable Flux](/enterprise_influxdb/v1/flux/installation/).
|
||||
{{% /note %}}
|
||||
|
||||
##### On this page
|
||||
- [Install the Flux VS Code extension](#install-the-flux-vs-code-extension)
|
||||
- [Connect to InfluxDB 1.8](#connect-to-influxdb-18)
|
||||
- [Connect to InfluxDB 1.11](#connect-to-influxdb-111)
|
||||
- [Query InfluxDB from VS Code](#query-influxdb-from-vs-code)
|
||||
- [Explore your schema](#explore-your-schema)
|
||||
- [Debug Flux queries](#debug-flux-queries)
|
||||
|
|
@ -38,7 +38,7 @@ The Flux VS Code extension is available in the **Visual Studio Marketplace**.
|
|||
For information about installing extensions from the Visual Studio marketplace,
|
||||
see the [Extension Marketplace documentation](https://code.visualstudio.com/docs/editor/extension-gallery).
|
||||
|
||||
## Connect to InfluxDB 1.8
|
||||
## Connect to InfluxDB 1.11
|
||||
To create an InfluxDB connection in VS Code:
|
||||
|
||||
1. Open the **VS Code Command Pallet** ({{< keybind mac="⇧⌘P" other="Ctrl+Shift+P" >}}).
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ menu:
|
|||
v2: /influxdb/v2/reference/cli/influx/
|
||||
---
|
||||
|
||||
The `influx` command line interface (CLI) includes commands to manage many aspects of InfluxDB, including databases, organizations, users, and tasks.
|
||||
|
||||
The `influx` command line interface (CLI) provides an interactive shell for the HTTP API associated with `influxd`.
|
||||
It includes commands for writing and querying data, and managing many aspects of InfluxDB, including databases, organizations, and users.
|
||||
|
||||
## Usage
|
||||
|
||||
|
|
@ -17,7 +17,6 @@ The `influx` command line interface (CLI) includes commands to manage many aspec
|
|||
influx [flags]
|
||||
```
|
||||
|
||||
|
||||
## Flags {.no-shorthand}
|
||||
|
||||
| Flag | Description |
|
||||
|
|
|
|||
|
|
@ -1,40 +1,51 @@
|
|||
---
|
||||
title: Use influx - InfluxDB command line interface
|
||||
description: InfluxDB's command line interface (`influx`) is an interactive shell for the HTTP API.
|
||||
aliases:
|
||||
- /enterprise_influxdb/v1/tools/shell
|
||||
- /enterprise_influxdb/v1/tools/use-influx/
|
||||
menu:
|
||||
enterprise_influxdb_v1:
|
||||
name: Use influx
|
||||
name: Use influx CLI
|
||||
weight: 10
|
||||
parent: influx
|
||||
aliases:
|
||||
- /enterprise_influxdb/v1/tools/influx-cli/use-influx/
|
||||
- /enterprise_influxdb/v1/tools/shell
|
||||
- /enterprise_influxdb/v1/tools/use-influx/
|
||||
related:
|
||||
- /enterprise_influxdb/v1/administration/backup-and-restore/
|
||||
---
|
||||
|
||||
InfluxDB's command line interface (`influx`) is an interactive shell for the HTTP API.
|
||||
Use `influx` to write data (manually or from a file), query data interactively, and view query output in different formats.
|
||||
The `influx` command line interface (CLI) provides an interactive shell for the HTTP API associated with `influxd`.
|
||||
Use `influx` to write data (manually or from a file), query data interactively, view query output in different formats, and manage resources in InfluxDB.
|
||||
|
||||
* [Launch `influx`](#launch-influx)
|
||||
* [`influx` Arguments](#influx-arguments)
|
||||
* [`influx` Commands](#influx-commands)
|
||||
|
||||
## Launch `influx`
|
||||
If you [install](https://influxdata.com/downloads/) InfluxDB via a package manager, the CLI is installed at `/usr/bin/influx` (`/usr/local/bin/influx` on macOS).
|
||||
|
||||
The `influx` CLI is included when you [install InfluxDB Enterprise](/enterprise_influxdb/v1/introduction/installation/).
|
||||
|
||||
If you [install](https://influxdata.com/downloads/) InfluxDB via a package manager, the CLI is installed at `/usr/bin/influx` ( on macOS).
|
||||
|
||||
To access the CLI, first launch the `influxd` database process and then launch `influx` in your terminal.
|
||||
Once you've entered the shell and successfully connected to an InfluxDB node, you'll see the following output:
|
||||
<br>
|
||||
<br>
|
||||
|
||||
```bash
|
||||
$ influx
|
||||
Connected to http://localhost:8086 version {{< latest-patch >}}
|
||||
InfluxDB shell version: {{< latest-patch >}}
|
||||
influx
|
||||
```
|
||||
|
||||
> **Note:** The versions of InfluxDB and the CLI should be identical. If not, parsing issues can occur with queries.
|
||||
If successfully connected to an InfluxDB node, the output is the following:
|
||||
|
||||
You can now enter InfluxQL queries as well as some CLI-specific commands directly in your terminal.
|
||||
You can use `help` at any time to get a list of available commands. Use `Ctrl+C` to cancel if you want to cancel a long-running InfluxQL query.
|
||||
```bash
|
||||
Connected to http://localhost:8086 version {{< latest-patch >}}
|
||||
InfluxDB shell version: {{< latest-patch >}}
|
||||
>
|
||||
```
|
||||
|
||||
_The versions of InfluxDB and the CLI should be identical. If not, parsing issues can occur with queries._
|
||||
|
||||
In the prompt, you can enter InfluxQL queries as well as CLI-specific commands.
|
||||
Enter `help` to get a list of available commands.
|
||||
Use `Ctrl+C` to cancel if you want to cancel a long-running InfluxQL query.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
|
|
@ -67,11 +78,14 @@ List of host names that should **not** go through any proxy. If set to an asteri
|
|||
NO_PROXY=123.45.67.89,123.45.67.90
|
||||
```
|
||||
|
||||
## `influx` Arguments
|
||||
There are several arguments you can pass into `influx` when starting.
|
||||
List them with `$ influx --help`.
|
||||
The list below offers a brief discussion of each option.
|
||||
We provide detailed information on `-execute`, `-format`, and `-import` at the end of this section.
|
||||
## `influx` arguments
|
||||
|
||||
Arguments specify connection, write, import, and output options for the CLI session.
|
||||
|
||||
`influx` provides the following arguments:
|
||||
|
||||
`-h`, `-help`
|
||||
List `influx` arguments
|
||||
|
||||
`-compressed`
|
||||
Set to true if the import file is compressed.
|
||||
|
|
@ -96,8 +110,8 @@ The host to which `influx` connects.
|
|||
By default, InfluxDB runs on localhost.
|
||||
|
||||
`-import`
|
||||
Import new data from a file or import a previously [exported](https://github.com/influxdb/influxdb/blob/1.8/importer/README.md) database from a file.
|
||||
See [-import](#import-data-from-a-file-with--import).
|
||||
Import new data or [exported data](/enterprise_influxdb/v1/administration/backup-and-restore/#exporting-data) from a file.
|
||||
See [-import](#import-data-from-a-file).
|
||||
|
||||
`-password 'password'`
|
||||
The password `influx` uses to connect to the server.
|
||||
|
|
@ -107,7 +121,7 @@ variable.
|
|||
|
||||
`-path`
|
||||
The path to the file to import.
|
||||
Use with `-import`.
|
||||
Use with [-import](#import-data-from-a-file).
|
||||
|
||||
`-port 'port #'`
|
||||
The port to which `influx` connects.
|
||||
|
|
@ -141,6 +155,12 @@ Alternatively, set the username for the CLI with the `INFLUX_USERNAME` environme
|
|||
`-version`
|
||||
Display the InfluxDB version and exit.
|
||||
|
||||
The following sections provide detailed examples for some arguments, including `-execute`, `-format`, and `-import`.
|
||||
|
||||
- [Execute an InfluxQL command and quit with `-execute`](#execute-an-influxql-command-and-quit-with--execute)
|
||||
- [Specify the format of the server responses with `-format`](#specify-the-format-of-the-server-responses-with--format)
|
||||
- [Import data from a file](#import-data-from-a-file)
|
||||
|
||||
### Execute an InfluxQL command and quit with `-execute`
|
||||
|
||||
Execute queries that don't require a database specification:
|
||||
|
|
@ -243,18 +263,19 @@ $ influx -format=json -pretty
|
|||
}
|
||||
```
|
||||
|
||||
### Import data from a file with `-import`
|
||||
### Import data from a file
|
||||
|
||||
The import file has two sections:
|
||||
An import file has two sections:
|
||||
|
||||
* **DDL (Data Definition Language)**: Contains the [InfluxQL commands](/enterprise_influxdb/v1/query_language/manage-database/) for creating the relevant [database](/enterprise_influxdb/v1/concepts/glossary/) and managing the [retention policy](/enterprise_influxdb/v1/concepts/glossary/#retention-policy-rp).
|
||||
- _Optional_: **DDL (Data Definition Language)**: Contains the [InfluxQL commands](/enterprise_influxdb/v1/query_language/manage-database/) for creating the relevant [database](/enterprise_influxdb/v1/concepts/glossary/) and managing the [retention policy](/enterprise_influxdb/v1/concepts/glossary/#retention-policy-rp).
|
||||
If your database and retention policy already exist, your file can skip this section.
|
||||
* **DML (Data Manipulation Language)**: Lists the relevant database and (if desired) retention policy and contains the data in [line protocol](/enterprise_influxdb/v1/concepts/glossary/#influxdb-line-protocol).
|
||||
- **DML (Data Manipulation Language)**: Context metadata that specifies the database and (if desired) retention policy for the import and contains the data in [line protocol](/enterprise_influxdb/v1/concepts/glossary/#influxdb-line-protocol).
|
||||
|
||||
Example:
|
||||
#### Example: import data from a file
|
||||
|
||||
File (`datarrr.txt`):
|
||||
```
|
||||
The following `datarrr.txt` file is output using the [`influx_inspect export` command](/enterprise_influxdb/v1/tools/influx_inspect#export):
|
||||
|
||||
```text
|
||||
# DDL
|
||||
CREATE DATABASE pirates
|
||||
CREATE RETENTION POLICY oneday ON pirates DURATION 1d REPLICATION 1
|
||||
|
|
@ -270,13 +291,17 @@ treasures,captain_id=tetra value=47 1439856000
|
|||
treasures,captain_id=crunch value=109 1439858880
|
||||
```
|
||||
|
||||
Command:
|
||||
```
|
||||
$influx -import -path=datarrr.txt -precision=s
|
||||
To import the file, enter the following command in your terminal:
|
||||
|
||||
```bash
|
||||
influx -import -path=datarrr.txt -precision=s
|
||||
```
|
||||
|
||||
Results:
|
||||
```
|
||||
The data is imported into the database and retention policy specified in the `# DML`
|
||||
context metadata.
|
||||
The output is the following:
|
||||
|
||||
```text
|
||||
2015/12/22 12:25:06 Processed 2 commands
|
||||
2015/12/22 12:25:06 Processed 5 inserts
|
||||
2015/12/22 12:25:06 Failed 0 inserts
|
||||
|
|
@ -289,18 +314,20 @@ Results:
|
|||
> Time elapsed: 56.740578415s.
|
||||
> Points per second (PPS): 54634
|
||||
|
||||
Things to note about `-import`:
|
||||
Keep the following in mind when using `-import`:
|
||||
|
||||
* Allow the database to ingest points by using `-pps` to set the number of points per second allowed by the import. By default, pps is zero and `influx` does not throttle importing.
|
||||
* Imports work with `.gz` files, just include `-compressed` in the command.
|
||||
* Include timestamps in the data file. InfluxDB will assign the same timestamp to points without a timestamp. This can lead to unintended [overwrite behavior](/enterprise_influxdb/v1/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points).
|
||||
* If your data file has more than 5,000 points, it may be necessary to split that file into several files in order to write your data in batches to InfluxDB.
|
||||
We recommend writing points in batches of 5,000 to 10,000 points.
|
||||
Smaller batches, and more HTTP requests, will result in sub-optimal performance.
|
||||
By default, the HTTP request times out after five seconds.
|
||||
InfluxDB will still attempt to write the points after that time out but there will be no confirmation that they were successfully written.
|
||||
- To throttle the import, use `-pps` to set the number of points per second to ingest. By default, pps is zero and `influx` does not throttle importing.
|
||||
- To import a file compressed with `gzip` (GNU zip), include the -compressed flag.
|
||||
- Include timestamps in the data file.
|
||||
If points don’t include a timestamp, InfluxDB assigns the same timestamp to those points, which can result in unintended [duplicate points or overwrites](/enterprise_influxdb/v1/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points).
|
||||
- If your data file contains more than 5,000 points, consider splitting it into smaller files to write data to InfluxDB in batches.
|
||||
We recommend writing points in batches of 5,000 to 10,000 for optimal performance.
|
||||
Writing smaller batches increases the number of HTTP requests, which can negatively impact performance.
|
||||
By default, the HTTP request times out after five seconds. Although InfluxDB continues attempting to write the points after a timeout, you won’t receive confirmation of a successful write.
|
||||
|
||||
> **Note:** For how to export data from InfluxDB version 0.8.9, see [Exporting from 0.8.9](https://github.com/influxdb/influxdb/blob/1.8/importer/README.md).
|
||||
> **Note:** To export data from InfluxDB version 0.8.9, see [Exporting from 0.8.9](https://github.com/influxdb/influxdb/blob/1.8/importer/README.md).
|
||||
|
||||
For more information, see [exporting and importing data](/enterprise_influxdb/v1/administration/backup-and-restore/#exporting-and-importing-data).
|
||||
|
||||
## `influx` commands
|
||||
|
||||
|
|
@ -29,18 +29,21 @@ influx_inspect [ [ command ] [ options ] ]
|
|||
|
||||
The `influx_inspect` commands are summarized here, with links to detailed information on each of the commands.
|
||||
|
||||
* [`buildtsi`](#buildtsi): Converts in-memory (TSM-based) shards to TSI.
|
||||
* [`deletetsm`](#deletetsm): Bulk deletes a measurement from a raw TSM file.
|
||||
* [`dumptsi`](#dumptsi): Dumps low-level details about TSI files.
|
||||
* [`dumptsm`](#dumptsm): Dumps low-level details about TSM files.
|
||||
* [`dumptsmwal`](#dumptsmwal): Dump all data from a WAL file.
|
||||
* [`export`](#export): Exports raw data from a shard in InfluxDB line protocol format.
|
||||
* [`report`](#report): Displays a shard level report.
|
||||
* [`report-disk`](#report-disk): Reports disk usage by shard and measurement.
|
||||
* [`reporttsi`](#reporttsi): Reports on cardinality for measurements and shards.
|
||||
* [`verify`](#verify): Verifies the integrity of TSM files.
|
||||
* [`verify-seriesfile`](#verify-seriesfile): Verifies the integrity of series files.
|
||||
* [`verify-tombstone`](#verify-tombstone): Verifies the integrity of tombstones.
|
||||
- [`buildtsi`](#buildtsi): Converts in-memory (TSM-based) shards to TSI.
|
||||
- [`check-schema`](#check-schema): Checks for type conflicts between shards.
|
||||
- [`deletetsm`](#deletetsm): Bulk deletes a measurement from a raw TSM file.
|
||||
- [`dumptsi`](#dumptsi): Dumps low-level details about TSI files.
|
||||
- [`dumptsm`](#dumptsm): Dumps low-level details about TSM files.
|
||||
- [`dumptsmwal`](#dumptsmwal): Dump all data from a WAL file.
|
||||
- [`export`](#export): Exports raw data from a shard in InfluxDB line protocol format.
|
||||
- [`merge-schema`](#merge-schema): Merges a set of schema files from the `check-schema` command.
|
||||
- [`report`](#report): Displays a shard level report.
|
||||
- [`report-db`](#report-db): Estimates InfluxDB Cloud (TSM) cardinality for a database.
|
||||
- [`report-disk`](#report-disk): Reports disk usage by shard and measurement.
|
||||
- [`reporttsi`](#reporttsi): Reports on cardinality for measurements and shards.
|
||||
- [`verify`](#verify): Verifies the integrity of TSM files.
|
||||
- [`verify-seriesfile`](#verify-seriesfile): Verifies the integrity of series files.
|
||||
- [`verify-tombstone`](#verify-tombstone): Verifies the integrity of tombstones.
|
||||
|
||||
### `buildtsi`
|
||||
|
||||
|
|
@ -90,6 +93,9 @@ The name of the database.
|
|||
|
||||
The path to the `data` directory.
|
||||
|
||||
Default value is `$HOME/.influxdb/data`.
|
||||
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/) for InfluxDB on your system.
|
||||
|
||||
##### `[ -max-cache-size ]`
|
||||
|
||||
The maximum size of the cache before it starts rejecting writes.
|
||||
|
|
@ -117,28 +123,56 @@ Flag to enable output in verbose mode.
|
|||
|
||||
The directory for the WAL (Write Ahead Log) files.
|
||||
|
||||
Default value is `$HOME/.influxdb/wal`.
|
||||
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/) for InfluxDB on your system.
|
||||
|
||||
#### Examples
|
||||
|
||||
##### Converting all shards on a node
|
||||
|
||||
```
|
||||
$ influx_inspect buildtsi -datadir ~/.influxdb/data -waldir ~/.influxdb/wal
|
||||
$ influx_inspect buildtsi -datadir /var/lib/influxdb/data -waldir /var/lib/influxdb/wal
|
||||
|
||||
```
|
||||
|
||||
##### Converting all shards for a database
|
||||
|
||||
```
|
||||
$ influx_inspect buildtsi -database mydb -datadir ~/.influxdb/data -waldir ~/.influxdb/wal
|
||||
$ influx_inspect buildtsi -database mydb datadir /var/lib/influxdb/data -waldir /var/lib/influxdb/wal
|
||||
|
||||
```
|
||||
|
||||
##### Converting a specific shard
|
||||
|
||||
```
|
||||
$ influx_inspect buildtsi -database stress -shard 1 -datadir ~/.influxdb/data -waldir ~/.influxdb/wal
|
||||
$ influx_inspect buildtsi -database stress -shard 1 datadir /var/lib/influxdb/data -waldir /var/lib/influxdb/wal
|
||||
```
|
||||
|
||||
### `check-schema`
|
||||
|
||||
Check for type conflicts between shards.
|
||||
|
||||
#### Syntax
|
||||
|
||||
```
|
||||
influx_inspect check-schema [ options ]
|
||||
```
|
||||
|
||||
#### Options
|
||||
|
||||
##### [ `-conflicts-file <string>` ]
|
||||
|
||||
Filename conflicts data should be written to. Default is `conflicts.json`.
|
||||
|
||||
##### [ `-path <string>` ]
|
||||
|
||||
Directory path where `fields.idx` files are located. Default is the current
|
||||
working directory `.`.
|
||||
|
||||
##### [ `-schema-file <string>` ]
|
||||
|
||||
Filename schema data should be written to. Default is `schema.json`.
|
||||
|
||||
### `deletetsm`
|
||||
|
||||
Use `deletetsm -measurement` to delete a measurement in a raw TSM file (from specified shards).
|
||||
|
|
@ -350,7 +384,9 @@ Default value is `""`.
|
|||
##### `-datadir <data_dir>`
|
||||
|
||||
The path to the `data` directory.
|
||||
Default value is `"$HOME/.influxdb/data"`.
|
||||
|
||||
Default value is `$HOME/.influxdb/data`.
|
||||
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/) for InfluxDB on your system.
|
||||
|
||||
##### [ `-end <timestamp>` ]
|
||||
|
||||
|
|
@ -377,12 +413,12 @@ YYYY-MM-DDTHH:MM:SS+07:00
|
|||
|
||||
##### [ `-lponly` ]
|
||||
Output data in line protocol format only.
|
||||
Does not include comments or data definition language (DDL), like `CREATE DATABASE`.
|
||||
Does not output data definition language (DDL) statements (such as `CREATE DATABASE`) or DML context metadata (such as `# CONTEXT-DATABASE`).
|
||||
|
||||
##### [ `-out <export_dir>` ]
|
||||
|
||||
The location for the export file.
|
||||
Default value is `"$HOME/.influxdb/export"`.
|
||||
Default value is `$HOME/.influxdb/export`.
|
||||
|
||||
##### [ `-retention <rp_name> ` ]
|
||||
|
||||
|
|
@ -396,7 +432,9 @@ The timestamp string must be in [RFC3339 format](https://tools.ietf.org/html/rfc
|
|||
##### [ `-waldir <wal_dir>` ]
|
||||
|
||||
Path to the [WAL](/enterprise_influxdb/v1/concepts/glossary/#wal-write-ahead-log) directory.
|
||||
Default value is `"$HOME/.influxdb/wal"`.
|
||||
|
||||
Default value is `$HOME/.influxdb/wal`.
|
||||
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/) for InfluxDB on your system.
|
||||
|
||||
#### Examples
|
||||
|
||||
|
|
@ -409,23 +447,43 @@ influx_inspect export -compress
|
|||
##### Export data from a specific database and retention policy
|
||||
|
||||
```bash
|
||||
influx_inspect export -database mydb -retention autogen
|
||||
influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY
|
||||
```
|
||||
|
||||
##### Output file
|
||||
|
||||
```bash
|
||||
# DDL
|
||||
CREATE DATABASE MY_DB_NAME
|
||||
CREATE RETENTION POLICY autogen ON MY_DB_NAME DURATION inf REPLICATION 1
|
||||
CREATE DATABASE DATABASE_NAME
|
||||
CREATE RETENTION POLICY <RETENTION_POLICY> ON <DATABASE_NAME> DURATION inf REPLICATION 1
|
||||
|
||||
# DML
|
||||
# CONTEXT-DATABASE:MY_DB_NAME
|
||||
# CONTEXT-RETENTION-POLICY:autogen
|
||||
# CONTEXT-DATABASE:DATABASE_NAME
|
||||
# CONTEXT-RETENTION-POLICY:RETENTION_POLICY
|
||||
randset value=97.9296104805 1439856000000000000
|
||||
randset value=25.3849066842 1439856100000000000
|
||||
```
|
||||
|
||||
### `merge-schema`
|
||||
|
||||
Merge a set of schema files from the [`check-schema` command](#check-schema).
|
||||
|
||||
#### Syntax
|
||||
|
||||
```
|
||||
influx_inspect merge-schema [ options ]
|
||||
```
|
||||
|
||||
#### Options
|
||||
|
||||
##### [ `-conflicts-file <string>` ]
|
||||
|
||||
Filename conflicts data should be written to. Default is `conflicts.json`.
|
||||
|
||||
##### [ `-schema-file <string>` ]
|
||||
|
||||
Filename for the output file. Default is `schema.json`.
|
||||
|
||||
### `report`
|
||||
|
||||
Displays series metadata for all shards.
|
||||
|
|
@ -461,6 +519,48 @@ The flag to report exact cardinality counts instead of estimates.
|
|||
Default value is `false`.
|
||||
Note: This can use a lot of memory.
|
||||
|
||||
### `report-db`
|
||||
|
||||
Use the `report-db` command to estimate the series cardinality of data in a
|
||||
database when migrated to InfluxDB Cloud (TSM). InfluxDB Cloud (TSM) includes
|
||||
field keys in the series key so unique field keys affect the total cardinality.
|
||||
The total series cardinality of data in a InfluxDB 1.x database may differ from
|
||||
from the series cardinality of that same data when migrated to InfluxDB Cloud (TSM).
|
||||
|
||||
#### Syntax
|
||||
|
||||
```
|
||||
influx_inspect report-db [ options ]
|
||||
```
|
||||
|
||||
#### Options
|
||||
|
||||
##### [ `-c <int>` ]
|
||||
|
||||
Set worker concurrency. Default is `1`.
|
||||
|
||||
##### `-db-path <string>`
|
||||
|
||||
{{< req >}}: The path to the database.
|
||||
|
||||
##### [ `-detailed` ]
|
||||
|
||||
Include counts for fields, tags in the command output.
|
||||
|
||||
##### [ `-exact` ]
|
||||
|
||||
Report exact cardinality counts instead of estimates.
|
||||
This method of calculation can use a lot of memory.
|
||||
|
||||
##### [ `-rollup <string>` ]
|
||||
|
||||
Specify the cardinality "rollup" level--the granularity of the cardinality report:
|
||||
|
||||
- `t`: total
|
||||
- `d`: database
|
||||
- `r`: retention policy
|
||||
- `m`: measurement <em class="op65">(Default)</em>
|
||||
|
||||
### `report-disk`
|
||||
|
||||
Use the `report-disk` command to review TSM file disk usage per shard and measurement in a specified directory. Useful for capacity planning and identifying which measurement or shard is using the most disk space. The default directory path `~/.influxdb/data/`.
|
||||
|
|
|
|||
1080
content/example.md
1080
content/example.md
File diff suppressed because it is too large
Load Diff
|
|
@ -62,4 +62,9 @@ option location = timezone.fixed(offset: -5h)
|
|||
option location = timezone.location(name: "America/Denver")
|
||||
```
|
||||
|
||||
> [!Note]
|
||||
> The `location` option only affects boundaries used for windowing, specifically around time shifts
|
||||
> like daylight savings. It does not change timestamps in the `_time` column, which are always UTC.
|
||||
|
||||
|
||||
{{< page-nav prev="/flux/v0/spec/variables/" next="/flux/v0/spec/types/" >}}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: influxdb.cardinality() function
|
||||
description: >
|
||||
`influxdb.cardinality()` returns the series cardinality of data stored in InfluxDB.
|
||||
`influxdb.cardinality()` returns the series cardinality of data retrieved from InfluxDB.
|
||||
menu:
|
||||
flux_v0_ref:
|
||||
name: influxdb.cardinality
|
||||
|
|
@ -28,9 +28,16 @@ Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md
|
|||
|
||||
------------------------------------------------------------------------------->
|
||||
|
||||
`influxdb.cardinality()` returns the series cardinality of data stored in InfluxDB.
|
||||
`influxdb.cardinality()` returns the series cardinality of data retrieved from InfluxDB.
|
||||
|
||||
|
||||
{{% note %}}
|
||||
Although this function is similar to InfluxQL's [`SHOW SERIES CARDINALITY`](/influxdb/v1/query_language/spec/#show-series-cardinality),
|
||||
it works in a slightly different manner.
|
||||
|
||||
`influxdb.cardinality()` is time bounded and reports the cardinality of data that matches the conditions passed into it rather than that of the bucket as a whole.
|
||||
{{% /note %}}
|
||||
|
||||
|
||||
##### Function type signature
|
||||
|
||||
|
|
@ -107,6 +114,12 @@ The cardinality calculation excludes points that match the specified start time.
|
|||
Use a relative duration or absolute time. For example, `-1h` or `2019-08-28T22:00:00Z`.
|
||||
Durations are relative to `now()`. Default is `now()`.
|
||||
|
||||
{{% note %}}
|
||||
The default value is `now()`, so any points that have been written into the future will
|
||||
not be counted unless a future `stop` date is provided.
|
||||
{{% /note %}}
|
||||
|
||||
|
||||
### predicate
|
||||
|
||||
Predicate function that filters records.
|
||||
|
|
@ -120,15 +133,17 @@ Default is `(r) => true`.
|
|||
- [Query series cardinality in a bucket](#query-series-cardinality-in-a-bucket)
|
||||
- [Query series cardinality in a measurement//](#query-series-cardinality-in-a-measurement)
|
||||
- [Query series cardinality for a specific tag](#query-series-cardinality-for-a-specific-tag)
|
||||
- [Query series cardinality of Data Written In the Last 4 Hours](#query-series-cardinality-of-data-written-in-the-last-4-hours)
|
||||
|
||||
### Query series cardinality in a bucket
|
||||
|
||||
```js
|
||||
import "influxdata/influxdb"
|
||||
|
||||
influxdb.cardinality(bucket: "example-bucket", start: -1y)
|
||||
influxdb.cardinality(bucket: "example-bucket", start: time(v: 1))
|
||||
|
||||
```
|
||||
Note: if points have been written into the future, you will need to add an appropriate `stop` date
|
||||
|
||||
|
||||
### Query series cardinality in a measurement//
|
||||
|
|
@ -138,7 +153,7 @@ import "influxdata/influxdb"
|
|||
|
||||
influxdb.cardinality(
|
||||
bucket: "example-bucket",
|
||||
start: -1y,
|
||||
start: time(v: 1),
|
||||
predicate: (r) => r._measurement == "example-measurement",
|
||||
)
|
||||
|
||||
|
|
@ -150,7 +165,16 @@ influxdb.cardinality(
|
|||
```js
|
||||
import "influxdata/influxdb"
|
||||
|
||||
influxdb.cardinality(bucket: "example-bucket", start: -1y, predicate: (r) => r.exampleTag == "foo")
|
||||
influxdb.cardinality(bucket: "example-bucket", start: time(v: 1), predicate: (r) => r.exampleTag == "foo")
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Query Cardinality of Data Written In the Last 4 hours
|
||||
```js
|
||||
import "influxdata/influxdb"
|
||||
|
||||
influxdb.cardinality(bucket: "example-bucket", start: -4h)
|
||||
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -77,3 +77,7 @@ option location = timezone.fixed(offset: -8h)
|
|||
|
||||
```
|
||||
|
||||
> [!Note]
|
||||
> The `location` option only affects boundaries used for windowing, specifically around time shifts
|
||||
> like daylight savings. It does not change timestamps in the `_time` column, which are always UTC.
|
||||
|
||||
|
|
|
|||
|
|
@ -75,3 +75,6 @@ option location = timezone.location(name: "America/Los_Angeles")
|
|||
|
||||
```
|
||||
|
||||
> [!Note]
|
||||
> The `location` option only affects boundaries used for windowing, specifically around time shifts
|
||||
> like daylight savings. It does not change timestamps in the `_time` column, which are always UTC.
|
||||
|
|
|
|||
|
|
@ -11,405 +11,9 @@ weight: 103
|
|||
influxdb/cloud-dedicated/tags: [storage]
|
||||
related:
|
||||
- /influxdb/cloud-dedicated/reference/internals/storage-engine/
|
||||
source: /shared/v3-distributed-admin-custom-partitions/_index.md
|
||||
---
|
||||
|
||||
When writing data to {{< product-name >}}, the InfluxDB v3 storage engine stores
|
||||
data in the [Object store](/influxdb/cloud-dedicated/reference/internals/storage-engine/#object-store)
|
||||
in [Apache Parquet](https://parquet.apache.org/) format.
|
||||
Each Parquet file represents a _partition_--a logical grouping of data.
|
||||
By default, InfluxDB partitions each table by day.
|
||||
{{< product-name >}} lets you customize the partitioning strategy and partition
|
||||
by tag values and different time intervals.
|
||||
Customize your partitioning strategy to optimize query performance for your
|
||||
specific schema and workload.
|
||||
|
||||
- [Advantages](#advantages)
|
||||
- [Disadvantages](#disadvantages)
|
||||
- [Limitations](#limitations)
|
||||
- [How partitioning works](#how-partitioning-works)
|
||||
- [Partition templates](#partition-templates)
|
||||
- [Partition keys](#partition-keys)
|
||||
- [Partitions in the query life cycle](#partitions-in-the-query-life-cycle)
|
||||
- [Partition guides](#partition-guides)
|
||||
{{< children type="anchored-list" >}}
|
||||
|
||||
## Advantages
|
||||
|
||||
The primary advantage of custom partitioning is that it lets you customize your
|
||||
storage structure to improve query performance specific to your schema and workload.
|
||||
|
||||
- **Optimized storage for improved performance on specific types of queries**.
|
||||
For example, if queries often select data with a specific tag value, you can
|
||||
partition by that tag to improve the performance of those queries.
|
||||
- **Optimized storage for specific types of data**. For example, if the data you
|
||||
store is sparse and the time ranges you query are often much larger than a day,
|
||||
you could partition your data by week instead of by day.
|
||||
|
||||
## Disadvantages
|
||||
|
||||
Using custom partitioning may increase the load on other parts of the
|
||||
[InfluxDB v3 storage engine](/influxdb/cloud-dedicated/reference/internals/storage-engine/),
|
||||
but each can be scaled individually to address the added load.
|
||||
|
||||
{{% note %}}
|
||||
_The following disadvantages assume that your custom partitioning strategy includes
|
||||
additional tags to partition by or partition intervals smaller than a day._
|
||||
{{% /note %}}
|
||||
|
||||
- **Increased load on the [Ingester](/influxdb/cloud-dedicated/reference/internals/storage-engine/#ingester)**
|
||||
as it groups data into smaller partitions and files.
|
||||
- **Increased load on the [Catalog](/influxdb/cloud-dedicated/reference/internals/storage-engine/#catalog)**
|
||||
as more references to partition Parquet file locations are stored and queried.
|
||||
- **Increased load on the [Compactor](/influxdb/cloud-dedicated/reference/internals/storage-engine/#compactor)**
|
||||
as more partition Parquet files need to be compacted.
|
||||
- **Increased costs associated with [Object storage](/influxdb/cloud-dedicated/reference/internals/storage-engine/#object-storage)**
|
||||
as more partition Parquet files are created and stored.
|
||||
- **Risk of decreased performance for queries that don't use tags in the WHERE clause**.
|
||||
These queries may end up reading many partitions and smaller files, degrading performance.
|
||||
|
||||
## Limitations
|
||||
|
||||
Custom partitioning has the following limitations:
|
||||
|
||||
- Database and table partitions can only be defined on create.
|
||||
You cannot update the partition strategy of a database or table after it has
|
||||
been created.
|
||||
- You can partition by up to eight dimensions (seven tags and a time interval).
|
||||
|
||||
## How partitioning works
|
||||
|
||||
### Partition templates
|
||||
|
||||
A partition template defines the pattern used for _[partition keys](#partition-keys)_
|
||||
and determines the time interval that data is partitioned by.
|
||||
Partition templates use tag values and
|
||||
[Rust strftime date and time formatting syntax](https://docs.rs/chrono/latest/chrono/format/strftime/index.html).
|
||||
|
||||
_For more detailed information, see [Partition templates](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/)._
|
||||
|
||||
### Partition keys
|
||||
|
||||
A partition key uniquely identifies a partition. The structure of partition keys
|
||||
is defined by a _[partition template](#partition-templates)_. Partition keys are
|
||||
composed of up to eight parts or dimensions (tags, tag buckets, and time).
|
||||
Each part is delimited by the partition key separator (`|`).
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
{{% expand "View example partition templates and keys" %}}
|
||||
|
||||
Given the following line protocol with the following timestamps:
|
||||
|
||||
- 2023-12-31T23:00:00Z
|
||||
- 2024-01-01T00:00:00Z
|
||||
- 2024-01-01T01:00:00Z
|
||||
|
||||
```text
|
||||
production,line=A,station=cnc temp=81.2,qty=35i 1704063600000000000
|
||||
production,line=A,station=wld temp=92.8,qty=35i 1704063600000000000
|
||||
production,line=B,station=cnc temp=101.1,qty=43i 1704063600000000000
|
||||
production,line=B,station=wld temp=102.4,qty=43i 1704063600000000000
|
||||
production,line=A,station=cnc temp=81.9,qty=36i 1704067200000000000
|
||||
production,line=A,station=wld temp=110.0,qty=22i 1704067200000000000
|
||||
production,line=B,station=cnc temp=101.8,qty=44i 1704067200000000000
|
||||
production,line=B,station=wld temp=105.7,qty=44i 1704067200000000000
|
||||
production,line=A,station=cnc temp=82.2,qty=35i 1704070800000000000
|
||||
production,line=A,station=wld temp=92.1,qty=30i 1704070800000000000
|
||||
production,line=B,station=cnc temp=102.4,qty=43i 1704070800000000000
|
||||
production,line=B,station=wld temp=106.5,qty=43i 1704070800000000000
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
{{% flex %}}
|
||||
|
||||
<!---------------------- BEGIN PARTITION EXAMPLES GROUP 1 --------------------->
|
||||
|
||||
{{% flex-content "half" %}}
|
||||
|
||||
##### Partition template parts
|
||||
|
||||
- `%Y-%m-%d` <em class="op50">time (by day, default format)</em>
|
||||
|
||||
{{% /flex-content %}}
|
||||
{{% flex-content %}}
|
||||
|
||||
##### Partition keys
|
||||
|
||||
- `2023-12-31`
|
||||
- `2024-01-01`
|
||||
|
||||
{{% /flex-content %}}
|
||||
|
||||
<!----------------------- END PARTITION EXAMPLES GROUP 1 ---------------------->
|
||||
|
||||
{{% /flex %}}
|
||||
|
||||
---
|
||||
|
||||
{{% flex %}}
|
||||
|
||||
<!---------------------- BEGIN PARTITION EXAMPLES GROUP 2 --------------------->
|
||||
|
||||
{{% flex-content "half" %}}
|
||||
|
||||
##### Partition template parts
|
||||
|
||||
- `line` <em class="op50">tag</em>
|
||||
- `%d %b %Y` <em class="op50">time (by day, non-default format)</em>
|
||||
|
||||
{{% /flex-content %}}
|
||||
{{% flex-content %}}
|
||||
|
||||
##### Partition keys
|
||||
|
||||
- `A | 31 Dec 2023`
|
||||
- `B | 31 Dec 2023`
|
||||
- `A | 01 Jan 2024`
|
||||
- `B | 01 Jan 2024`
|
||||
|
||||
{{% /flex-content %}}
|
||||
|
||||
<!----------------------- END PARTITION EXAMPLES GROUP 2 ---------------------->
|
||||
|
||||
{{% /flex %}}
|
||||
|
||||
---
|
||||
|
||||
{{% flex %}}
|
||||
|
||||
<!---------------------- BEGIN PARTITION EXAMPLES GROUP 3 --------------------->
|
||||
|
||||
{{% flex-content "half" %}}
|
||||
|
||||
##### Partition template parts
|
||||
|
||||
- `line` <em class="op50">tag</em>
|
||||
- `station` <em class="op50">tag</em>
|
||||
- `%Y-%m-%d` <em class="op50">time (by day, default format)</em>
|
||||
|
||||
{{% /flex-content %}}
|
||||
{{% flex-content %}}
|
||||
|
||||
##### Partition keys
|
||||
|
||||
- `A | cnc | 2023-12-31`
|
||||
- `A | wld | 2023-12-31`
|
||||
- `B | cnc | 2023-12-31`
|
||||
- `B | wld | 2023-12-31`
|
||||
- `A | cnc | 2024-01-01`
|
||||
- `A | wld | 2024-01-01`
|
||||
- `B | cnc | 2024-01-01`
|
||||
- `B | wld | 2024-01-01`
|
||||
|
||||
{{% /flex-content %}}
|
||||
|
||||
<!----------------------- END PARTITION EXAMPLES GROUP 3 ---------------------->
|
||||
|
||||
{{% /flex %}}
|
||||
|
||||
---
|
||||
|
||||
{{% flex %}}
|
||||
|
||||
<!---------------------- BEGIN PARTITION EXAMPLES GROUP 4 --------------------->
|
||||
|
||||
{{% flex-content "half" %}}
|
||||
|
||||
##### Partition template parts
|
||||
|
||||
- `line` <em class="op50">tag</em>
|
||||
- `station,3` <em class="op50">tag bucket</em>
|
||||
- `%Y-%m-%d` <em class="op50">time (by day, default format)</em>
|
||||
|
||||
{{% /flex-content %}}
|
||||
{{% flex-content %}}
|
||||
|
||||
##### Partition keys
|
||||
|
||||
- `A | 0 | 2023-12-31`
|
||||
- `B | 0 | 2023-12-31`
|
||||
- `A | 0 | 2024-01-01`
|
||||
- `B | 0 | 2024-01-01`
|
||||
|
||||
{{% /flex-content %}}
|
||||
|
||||
<!----------------------- END PARTITION EXAMPLES GROUP 4 ---------------------->
|
||||
|
||||
{{% /flex %}}
|
||||
|
||||
---
|
||||
|
||||
{{% flex %}}
|
||||
|
||||
<!---------------------- BEGIN PARTITION EXAMPLES GROUP 5 --------------------->
|
||||
|
||||
{{% flex-content "half" %}}
|
||||
|
||||
##### Partition template parts
|
||||
|
||||
- `line` <em class="op50">tag</em>
|
||||
- `station` <em class="op50">tag</em>
|
||||
- `%Y-%m-%d %H:00` <em class="op50">time (by hour)</em>
|
||||
|
||||
{{% /flex-content %}}
|
||||
{{% flex-content %}}
|
||||
|
||||
##### Partition keys
|
||||
|
||||
- `A | cnc | 2023-12-31 23:00`
|
||||
- `A | wld | 2023-12-31 23:00`
|
||||
- `B | cnc | 2023-12-31 23:00`
|
||||
- `B | wld | 2023-12-31 23:00`
|
||||
- `A | cnc | 2024-01-01 00:00`
|
||||
- `A | wld | 2024-01-01 00:00`
|
||||
- `B | cnc | 2024-01-01 00:00`
|
||||
- `B | wld | 2024-01-01 00:00`
|
||||
- `A | cnc | 2024-01-01 01:00`
|
||||
- `A | wld | 2024-01-01 01:00`
|
||||
- `B | cnc | 2024-01-01 01:00`
|
||||
- `B | wld | 2024-01-01 01:00`
|
||||
|
||||
{{% /flex-content %}}
|
||||
|
||||
<!----------------------- END PARTITION EXAMPLES GROUP 5 ---------------------->
|
||||
|
||||
{{% /flex %}}
|
||||
|
||||
---
|
||||
|
||||
{{% flex %}}
|
||||
|
||||
<!---------------------- BEGIN PARTITION EXAMPLES GROUP 6 --------------------->
|
||||
|
||||
{{% flex-content "half" %}}
|
||||
|
||||
##### Partition template parts
|
||||
|
||||
- `line` <em class="op50">tag</em>
|
||||
- `station,50` <em class="op50">tag bucket</em>
|
||||
- `%Y-%m-%d %H:00` <em class="op50">time (by hour)</em>
|
||||
|
||||
{{% /flex-content %}}
|
||||
{{% flex-content %}}
|
||||
|
||||
##### Partition keys
|
||||
|
||||
- `A | 47 | 2023-12-31 23:00`
|
||||
- `A | 9 | 2023-12-31 23:00`
|
||||
- `B | 47 | 2023-12-31 23:00`
|
||||
- `B | 9 | 2023-12-31 23:00`
|
||||
- `A | 47 | 2024-01-01 00:00`
|
||||
- `A | 9 | 2024-01-01 00:00`
|
||||
- `B | 47 | 2024-01-01 00:00`
|
||||
- `B | 9 | 2024-01-01 00:00`
|
||||
- `A | 47 | 2024-01-01 01:00`
|
||||
- `A | 9 | 2024-01-01 01:00`
|
||||
- `B | 47 | 2024-01-01 01:00`
|
||||
- `B | 9 | 2024-01-01 01:00`
|
||||
|
||||
{{% /flex-content %}}
|
||||
|
||||
<!----------------------- END PARTITION EXAMPLES GROUP 6 ---------------------->
|
||||
|
||||
{{% /flex %}}
|
||||
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
## Partitions in the query life cycle
|
||||
|
||||
When querying data:
|
||||
|
||||
1. The [Catalog](/influxdb/cloud-dedicated/reference/internals/storage-engine/#catalog)
|
||||
provides the v3 query engine ([Querier](/influxdb/cloud-dedicated/reference/internals/storage-engine/#querier))
|
||||
with the locations of partitions that contain the queried time series data.
|
||||
2. The query engine reads all rows in the returned partitions to identify what
|
||||
rows match the logic in the query and should be included in the query result.
|
||||
|
||||
The faster the query engine can identify what partitions to read and then read
|
||||
the data in those partitions, the more performant queries are.
|
||||
|
||||
_For more information about the query lifecycle, see
|
||||
[InfluxDB v3 query life cycle](/influxdb/cloud-dedicated/reference/internals/storage-engine/#query-life-cycle)._
|
||||
|
||||
##### Query example
|
||||
|
||||
Consider the following query that selects everything in the `production` table
|
||||
where the `line` tag is `A` and the `station` tag is `cnc`:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM production
|
||||
WHERE
|
||||
time >= now() - INTERVAL '1 week'
|
||||
AND line = 'A'
|
||||
AND station = 'cnc'
|
||||
```
|
||||
|
||||
Using the default partitioning strategy (by day), the query engine
|
||||
reads eight separate partitions (one partition for today and one for each of the
|
||||
last seven days):
|
||||
|
||||
- {{< datetime/current-date trimTime=true >}}
|
||||
- {{< datetime/current-date offset=-1 trimTime=true >}}
|
||||
- {{< datetime/current-date offset=-2 trimTime=true >}}
|
||||
- {{< datetime/current-date offset=-3 trimTime=true >}}
|
||||
- {{< datetime/current-date offset=-4 trimTime=true >}}
|
||||
- {{< datetime/current-date offset=-5 trimTime=true >}}
|
||||
- {{< datetime/current-date offset=-6 trimTime=true >}}
|
||||
- {{< datetime/current-date offset=-7 trimTime=true >}}
|
||||
|
||||
The query engine must scan _all_ rows in the partitions to identify rows
|
||||
where `line` is `A` and `station` is `cnc`. This process takes valuable time
|
||||
and results in less performant queries.
|
||||
|
||||
However, if you partition by other tags, InfluxDB can identify partitions that
|
||||
contain only the tag values your query needs and spend less time
|
||||
scanning rows to see if they contain the tag values.
|
||||
|
||||
For example, if data is partitioned by `line`, `station`, and day, although
|
||||
there are more partition files, the query engine can quickly identify and read
|
||||
only those with data relevant to the query:
|
||||
|
||||
{{% columns 4 %}}
|
||||
|
||||
- <strong class="req normal green">A | cnc | {{< datetime/current-date trimTime=true >}}</strong>
|
||||
- A | wld | {{< datetime/current-date trimTime=true >}}
|
||||
- B | cnc | {{< datetime/current-date trimTime=true >}}
|
||||
- B | wld | {{< datetime/current-date trimTime=true >}}
|
||||
- <strong class="req normal green">A | cnc | {{< datetime/current-date offset=-1 trimTime=true >}}</strong>
|
||||
- A | wld | {{< datetime/current-date offset=-1 trimTime=true >}}
|
||||
- B | cnc | {{< datetime/current-date offset=-1 trimTime=true >}}
|
||||
- B | wld | {{< datetime/current-date offset=-1 trimTime=true >}}
|
||||
- <strong class="req normal green">A | cnc | {{< datetime/current-date offset=-2 trimTime=true >}}</strong>
|
||||
- A | wld | {{< datetime/current-date offset=-2 trimTime=true >}}
|
||||
- B | cnc | {{< datetime/current-date offset=-2 trimTime=true >}}
|
||||
- B | wld | {{< datetime/current-date offset=-2 trimTime=true >}}
|
||||
- <strong class="req normal green">A | cnc | {{< datetime/current-date offset=-3 trimTime=true >}}</strong>
|
||||
- A | wld | {{< datetime/current-date offset=-3 trimTime=true >}}
|
||||
- B | cnc | {{< datetime/current-date offset=-3 trimTime=true >}}
|
||||
- B | wld | {{< datetime/current-date offset=-3 trimTime=true >}}
|
||||
- <strong class="req normal green">A | cnc | {{< datetime/current-date offset=-4 trimTime=true >}}</strong>
|
||||
- A | wld | {{< datetime/current-date offset=-4 trimTime=true >}}
|
||||
- B | cnc | {{< datetime/current-date offset=-4 trimTime=true >}}
|
||||
- B | wld | {{< datetime/current-date offset=-4 trimTime=true >}}
|
||||
- <strong class="req normal green">A | cnc | {{< datetime/current-date offset=-5 trimTime=true >}}</strong>
|
||||
- A | wld | {{< datetime/current-date offset=-5 trimTime=true >}}
|
||||
- B | cnc | {{< datetime/current-date offset=-5 trimTime=true >}}
|
||||
- B | wld | {{< datetime/current-date offset=-5 trimTime=true >}}
|
||||
- <strong class="req normal green">A | cnc | {{< datetime/current-date offset=-6 trimTime=true >}}</strong>
|
||||
- A | wld | {{< datetime/current-date offset=-6 trimTime=true >}}
|
||||
- B | cnc | {{< datetime/current-date offset=-6 trimTime=true >}}
|
||||
- B | wld | {{< datetime/current-date offset=-6 trimTime=true >}}
|
||||
- <strong class="req normal green">A | cnc | {{< datetime/current-date offset=-7 trimTime=true >}}</strong>
|
||||
- A | wld | {{< datetime/current-date offset=-7 trimTime=true >}}
|
||||
- B | cnc | {{< datetime/current-date offset=-7 trimTime=true >}}
|
||||
- B | wld | {{< datetime/current-date offset=-7 trimTime=true >}}
|
||||
|
||||
{{% /columns %}}
|
||||
|
||||
---
|
||||
|
||||
## Partition guides
|
||||
|
||||
{{< children >}}
|
||||
<!--
|
||||
The content of this page is at /content/shared/v3-distributed-admin-custom-partitions/_index.md
|
||||
-->
|
||||
|
|
|
|||
|
|
@ -8,52 +8,9 @@ menu:
|
|||
name: Best practices
|
||||
parent: Manage data partitioning
|
||||
weight: 202
|
||||
source: /shared/v3-distributed-admin-custom-partitions/best-practices.md
|
||||
---
|
||||
|
||||
Use the following best practices when defining custom partitioning strategies
|
||||
for your data stored in {{< product-name >}}.
|
||||
|
||||
- [Partition by tags that you commonly query for a specific value](#partition-by-tags-that-you-commonly-query-for-a-specific-value)
|
||||
- [Only partition by tags that _always_ have a value](#only-partition-by-tags-that-always-have-a-value)
|
||||
- [Avoid over-partitioning](#avoid-over-partitioning)
|
||||
|
||||
## Partition by tags that you commonly query for a specific value
|
||||
|
||||
Custom partitioning primarily benefits queries that look for a specific tag
|
||||
value in the `WHERE` clause. For example, if you often query data related to a
|
||||
specific ID, partitioning by the tag that stores the ID helps the InfluxDB
|
||||
query engine to more quickly identify what partitions contain the relevant data.
|
||||
|
||||
{{% note %}}
|
||||
|
||||
#### Use tag buckets for high-cardinality tags
|
||||
|
||||
Partitioning using distinct values of tags with many (10K+) unique values can
|
||||
actually hurt query performance as partitions are created for each unique tag value.
|
||||
Instead, use [tag buckets](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-bucket-part-templates)
|
||||
to partition by high-cardinality tags.
|
||||
This method of partitioning groups tag values into "buckets" and partitions by bucket.
|
||||
{{% /note %}}
|
||||
|
||||
## Only partition by tags that _always_ have a value
|
||||
|
||||
You should only partition by tags that _always_ have a value.
|
||||
If points don't have a value for the tag, InfluxDB can't store them in the correct partitions and, at query time, must read all the partitions.
|
||||
|
||||
## Avoid over-partitioning
|
||||
|
||||
As you plan your partitioning strategy, keep in mind that data can be
|
||||
"over-partitioned"--meaning partitions are so granular that queries end up
|
||||
having to retrieve and read many partitions from the object store, which
|
||||
hurts query performance.
|
||||
|
||||
- Avoid using partition time intervals that are **less than one day**.
|
||||
|
||||
The partition time interval should be balanced with the actual amount of data
|
||||
written during each interval. If a single interval doesn't contain a lot of data,
|
||||
it is better to partition by larger time intervals.
|
||||
|
||||
- Don't partition by tags that you typically don't use in your query workload.
|
||||
- Don't partition by distinct values of high-cardinality tags.
|
||||
Instead, [use tag buckets](#use-tag-buckets-for-high-cardinality-tags) to
|
||||
partition by these tags.
|
||||
<!--
|
||||
The content of this page is at /content/shared/v3-distributed-admin-custom-partitions/best-practices.md
|
||||
-->
|
||||
|
|
|
|||
|
|
@ -10,162 +10,9 @@ weight: 202
|
|||
related:
|
||||
- /influxdb/cloud-dedicated/reference/cli/influxctl/database/create/
|
||||
- /influxdb/cloud-dedicated/reference/cli/influxctl/table/create/
|
||||
source: /shared/v3-distributed-admin-custom-partitions/define-custom-partitions.md
|
||||
---
|
||||
|
||||
Use the [`influxctl` CLI](/influxdb/cloud-dedicated/reference/cli/influxctl/)
|
||||
to define custom partition strategies when creating a database or table.
|
||||
By default, {{< product-name >}} partitions data by day.
|
||||
|
||||
The partitioning strategy of a database or table is determined by a
|
||||
[partition template](/influxdb/cloud-dedicated/admin/custom-partitions/#partition-templates)
|
||||
which defines the naming pattern for [partition keys](/influxdb/cloud-dedicated/admin/custom-partitions/#partition-keys).
|
||||
Partition keys uniquely identify each partition.
|
||||
When a partition template is applied to a database, it becomes the default template
|
||||
for all tables in that database, but can be overridden when creating a
|
||||
table.
|
||||
|
||||
- [Create a database with a custom partition template](#create-a-database-with-a-custom-partition-template)
|
||||
- [Create a table with a custom partition template](#create-a-table-with-a-custom-partition-template)
|
||||
- [Example partition templates](#example-partition-templates)
|
||||
|
||||
{{% note %}}
|
||||
|
||||
#### Partition templates can only be applied on create
|
||||
|
||||
You can only apply a partition template when creating a database or table.
|
||||
You can't update a partition template on an existing resource.
|
||||
{{% /note %}}
|
||||
|
||||
Use the following command flags to identify
|
||||
[partition template parts](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-part-templates):
|
||||
|
||||
- `--template-tag`: An [InfluxDB tag](/influxdb/cloud-dedicated/reference/glossary/#tag)
|
||||
to use in the partition template.
|
||||
- `--template-tag-bucket`: An [InfluxDB tag](/influxdb/cloud-dedicated/reference/glossary/#tag)
|
||||
and number of "buckets" to group tag values into.
|
||||
Provide the tag key and the number of buckets to bucket tag values into
|
||||
separated by a comma: `tagKey,N`.
|
||||
- `--template-timeformat`: A [Rust strftime date and time](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates)
|
||||
string that specifies the time format in the partition template and determines
|
||||
the time interval to partition by.
|
||||
|
||||
{{% note %}}
|
||||
A partition template can include up to 7 total tag and tag bucket parts
|
||||
and only 1 time part.
|
||||
{{% /note %}}
|
||||
|
||||
_View [partition template part restrictions](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#restrictions)._
|
||||
|
||||
{{% note %}}
|
||||
#### Always provide a time format when using custom partitioning
|
||||
|
||||
When defining a custom partition template for your database or table using any
|
||||
of the `influxctl` `--template-*` flags, always include the `--template-timeformat`
|
||||
flag with a time format to use in your partition template.
|
||||
Otherwise, InfluxDB omits time from the partition template and won't compact partitions.
|
||||
{{% /note %}}
|
||||
|
||||
## Create a database with a custom partition template
|
||||
|
||||
The following example creates a new `example-db` database and applies a partition
|
||||
template that partitions by distinct values of two tags (`room` and `sensor-type`),
|
||||
bucketed values of the `customerID` tag, and by week using the time format `%Y wk:%W`:
|
||||
|
||||
<!--Skip database create and delete tests: namespaces aren't reusable-->
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxctl database create \
|
||||
--template-tag room \
|
||||
--template-tag sensor-type \
|
||||
--template-tag-bucket customerID,500 \
|
||||
--template-timeformat '%Y wk:%W' \
|
||||
example-db
|
||||
```
|
||||
|
||||
## Create a table with a custom partition template
|
||||
|
||||
The following example creates a new `example-table` table in the specified
|
||||
database and applies a partition template that partitions by distinct values of
|
||||
two tags (`room` and `sensor-type`), bucketed values of the `customerID` tag,
|
||||
and by month using the time format `%Y-%m`:
|
||||
|
||||
<!--Skip database create and delete tests: namespaces aren't reusable-->
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
{{% code-placeholders "DATABASE_NAME" %}}
|
||||
|
||||
```sh
|
||||
influxctl table create \
|
||||
--template-tag room \
|
||||
--template-tag sensor-type \
|
||||
--template-tag-bucket customerID,500 \
|
||||
--template-timeformat '%Y-%m' \
|
||||
DATABASE_NAME \
|
||||
example-table
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
Replace the following in your command:
|
||||
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb/cloud-dedicated/admin/databases/)
|
||||
|
||||
<!--actual test
|
||||
|
||||
```sh
|
||||
|
||||
# Test the preceding command outside of the code block.
|
||||
# influxctl authentication requires TTY interaction--
|
||||
# output the auth URL to a file that the host can open.
|
||||
|
||||
TABLE_NAME=table_TEST_RUN
|
||||
script -c "influxctl table create \
|
||||
--template-tag room \
|
||||
--template-tag sensor-type \
|
||||
--template-tag-bucket customerID,500 \
|
||||
--template-timeformat '%Y-%m' \
|
||||
DATABASE_NAME \
|
||||
$TABLE_NAME" \
|
||||
/dev/null > /shared/urls.txt
|
||||
|
||||
script -c "influxctl query \
|
||||
--database DATABASE_NAME \
|
||||
--token DATABASE_TOKEN \
|
||||
'SHOW TABLES'" > /shared/temp_tables.txt
|
||||
grep -q $TABLE_NAME /shared/temp_tables.txt
|
||||
rm /shared/temp_tables.txt
|
||||
```
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/v3-distributed-admin-custom-partitions/_define-custom-partitions.md
|
||||
-->
|
||||
|
||||
## Example partition templates
|
||||
|
||||
Given the following [line protocol](/influxdb/cloud-dedicated/reference/syntax/line-protocol/)
|
||||
with a `2024-01-01T00:00:00Z` timestamp:
|
||||
|
||||
```text
|
||||
prod,line=A,station=weld1 temp=81.9,qty=36i 1704067200000000000
|
||||
```
|
||||
|
||||
##### Partitioning by distinct tag values
|
||||
|
||||
| Description | Tag parts | Time part | Resulting partition key |
|
||||
| :---------------------- | :---------------- | :--------- | :----------------------- |
|
||||
| By day (default) | | `%Y-%m-%d` | 2024-01-01 |
|
||||
| By day (non-default) | | `%d %b %Y` | 01 Jan 2024 |
|
||||
| By week | | `%Y wk:%W` | 2024 wk:01 |
|
||||
| By month | | `%Y-%m` | 2024-01 |
|
||||
| Single tag, by day | `line` | `%F` | A \| 2024-01-01 |
|
||||
| Single tag, by week | `line` | `%Y wk:%W` | A \| 2024 wk:01 |
|
||||
| Single tag, by month | `line` | `%Y-%m` | A \| 2024-01 |
|
||||
| Multiple tags, by day | `line`, `station` | `%F` | A \| weld1 \| 2024-01-01 |
|
||||
| Multiple tags, by week | `line`, `station` | `%Y wk:%W` | A \| weld1 \| 2024 wk:01 |
|
||||
| Multiple tags, by month | `line`, `station` | `%Y-%m` | A \| weld1 \| 2024-01 |
|
||||
|
||||
##### Partition by tag buckets
|
||||
|
||||
| Description | Tag part | Tag bucket part | Time part | Resulting partition key |
|
||||
| :--------------------------------- | :------- | :-------------- | :--------- | :---------------------- |
|
||||
| Distinct tag, tag buckets, by day | `line` | `station,100` | `%F` | A \| 3 \| 2024-01-01 |
|
||||
| Distinct tag, tag buckets, by week | `line` | `station,500` | `%Y wk:%W` | A \| 303 \| 2024 wk:01 |
|
||||
|
|
|
|||
|
|
@ -8,276 +8,9 @@ menu:
|
|||
influxdb_cloud_dedicated:
|
||||
parent: Manage data partitioning
|
||||
weight: 202
|
||||
source: /shared/v3-distributed-admin-custom-partitions/partition-templates.md
|
||||
---
|
||||
|
||||
Use partition templates to define the patterns used to generate partition keys.
|
||||
A partition key uniquely identifies a partition and is used to name the partition
|
||||
Parquet file in the [Object store](/influxdb/cloud-dedicated/reference/internals/storage-engine/#object-store).
|
||||
|
||||
A partition template consists of 1-8 _template parts_---dimensions to partition data by.
|
||||
There are three types of template parts:
|
||||
|
||||
- **tag**: An [InfluxDB tag](/influxdb/cloud-dedicated/reference/glossary/#tag)
|
||||
to partition by.
|
||||
- **tag bucket**: An [InfluxDB tag](/influxdb/cloud-dedicated/reference/glossary/#tag)
|
||||
and number of "buckets" to group tag values into. Data is partitioned by the
|
||||
tag bucket rather than each distinct tag value.
|
||||
- **time**: A Rust strftime date and time string that specifies the time interval
|
||||
to partition data by. The smallest unit of time included in the time part
|
||||
template is the interval used to partition data.
|
||||
|
||||
{{% note %}}
|
||||
A partition template can include up to 7 total tag and tag bucket parts
|
||||
and only 1 time part.
|
||||
{{% /note %}}
|
||||
|
||||
<!-- TOC -->
|
||||
- [Restrictions](#restrictions)
|
||||
- [Template part size limit](#template-part-size-limit)
|
||||
- [Reserved keywords](#reserved-keywords)
|
||||
- [Reserved Characters](#reserved-characters)
|
||||
- [Tag part templates](#tag-part-templates)
|
||||
- [Tag bucket part templates](#tag-bucket-part-templates)
|
||||
- [Time part templates](#time-part-templates)
|
||||
- [Date specifiers](#date-specifiers)
|
||||
- [Time specifiers](#time-specifiers)
|
||||
- [Time zone specifiers](#time-zone-specifiers)
|
||||
- [Date and time specifiers](#date-and-time-specifiers)
|
||||
- [Special specifiers](#special-specifiers)
|
||||
<!-- /TOC -->
|
||||
|
||||
## Restrictions
|
||||
|
||||
### Template part size limit
|
||||
|
||||
Each template part is limited to 200 bytes in length.
|
||||
Anything longer will be truncated at 200 bytes and appended with `#`.
|
||||
|
||||
### Partition key size limit
|
||||
|
||||
With the truncation of template parts, the maximum length of a partition key is
|
||||
1,607 bytes (1.57 KiB).
|
||||
|
||||
### Reserved keywords
|
||||
|
||||
The following reserved keywords cannot be used in partition templates:
|
||||
|
||||
- `time`
|
||||
|
||||
### Reserved Characters
|
||||
|
||||
If used in template parts, non-ASCII characters and the following reserved
|
||||
characters must be [percent encoded](https://developer.mozilla.org/en-US/docs/Glossary/Percent-encoding):
|
||||
|
||||
- `|`: Partition key part delimiter
|
||||
- `!`: Null or missing partition key part
|
||||
- `^`: Empty string partition key part
|
||||
- `#`: Key part truncation marker
|
||||
- `%`: Required for unambiguous reversal of percent encoding
|
||||
|
||||
## Tag part templates
|
||||
|
||||
Tag part templates consist of a _tag key_ to partition by.
|
||||
Generated partition keys include the unique _tag value_ specific to each partition.
|
||||
|
||||
## Tag bucket part templates
|
||||
|
||||
Tag bucket part templates consist of a _tag key_ to partition by and the
|
||||
_number of "buckets" to partition tag values into_--for example:
|
||||
|
||||
```
|
||||
customerID,500
|
||||
```
|
||||
|
||||
Values of the `customerID` tag are bucketed into 500 distinct "buckets."
|
||||
Each bucket is identified by the remainder of the tag value hashed into a 32bit
|
||||
integer divided by the specified number of buckets:
|
||||
|
||||
```rust
|
||||
hash(tagValue) % N
|
||||
```
|
||||
|
||||
Generated partition keys include the unique _tag bucket identifier_ specific to
|
||||
each partition.
|
||||
|
||||
**Supported number of tag buckets**: 1-1,000
|
||||
|
||||
{{% note %}}
|
||||
Tag buckets should be used to partition by high cardinality tags or tags with an
|
||||
unknown number of distinct values.
|
||||
{{% /note %}}
|
||||
|
||||
## Time part templates
|
||||
|
||||
Time part templates use [Rust strftime date and time formatting syntax](https://docs.rs/chrono/latest/chrono/format/strftime/index.html)
|
||||
to specify time format in partition keys.
|
||||
The smallest unit of time included in the time part template is the interval
|
||||
used to partition data.
|
||||
|
||||
{{% warn %}}
|
||||
#### Avoid partitioning by less than one day
|
||||
|
||||
We do not recommend using time intervals less than one day to partition data.
|
||||
This can result in [over-partitioned data](/influxdb/cloud-dedicated/admin/custom-partitions/best-practices/#avoid-over-partitioning)
|
||||
and may hurt query performance.
|
||||
{{% /warn %}}
|
||||
|
||||
- [Date specifiers](#date-specifiers)
|
||||
- [Time specifiers](#time-specifiers)
|
||||
- [Time zone specifiers](#time-zone-specifiers)
|
||||
- [Date and time specifiers](#date-and-time-specifiers)
|
||||
- [Special specifiers](#special-specifiers)
|
||||
|
||||
{{% note %}}
|
||||
The following is adapted from the
|
||||
[Rust strftime source code](https://docs.rs/chrono/latest/src/chrono/format/strftime.rs.html).
|
||||
{{% /note %}}
|
||||
|
||||
### Date specifiers
|
||||
|
||||
| Variable | Example | Description |
|
||||
| :------: | :--------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `%Y` | `2001` | The full proleptic Gregorian year, zero-padded to 4 digits. chrono supports years from -262144 to 262143. Note: years before 1 BCE or after 9999 CE, require an initial sign (+/-). |
|
||||
| `%C` | `20` | The proleptic Gregorian year divided by 100, zero-padded to 2 digits. [^1] |
|
||||
| `%y` | `01` | The proleptic Gregorian year modulo 100, zero-padded to 2 digits. [^1] |
|
||||
| `%m` | `07` | Month number (01--12), zero-padded to 2 digits. |
|
||||
| `%b` | `Jul` | Abbreviated month name. Always 3 letters. |
|
||||
| `%B` | `July` | Full month name. Also accepts corresponding abbreviation in parsing. |
|
||||
| `%h` | `Jul` | Same as `%b`. |
|
||||
| `%d` | `08` | Day number (01--31), zero-padded to 2 digits. |
|
||||
| `%e` | ` 8` | Same as `%d` but space-padded. Same as `%_d`. |
|
||||
| `%a` | `Sun` | Abbreviated weekday name. Always 3 letters. |
|
||||
| `%A` | `Sunday` | Full weekday name. Also accepts corresponding abbreviation in parsing. |
|
||||
| `%w` | `0` | Sunday = 0, Monday = 1, ..., Saturday = 6. |
|
||||
| `%u` | `7` | Monday = 1, Tuesday = 2, ..., Sunday = 7. (ISO 8601) |
|
||||
| `%U` | `28` | Week number starting with Sunday (00--53), zero-padded to 2 digits. [^2] |
|
||||
| `%W` | `27` | Same as `%U`, but week 1 starts with the first Monday in that year instead. |
|
||||
| `%G` | `2001` | Same as `%Y` but uses the year number in ISO 8601 week date. [^3] |
|
||||
| `%g` | `01` | Same as `%y` but uses the year number in ISO 8601 week date. [^3] |
|
||||
| `%V` | `27` | Same as `%U` but uses the week number in ISO 8601 week date (01--53). [^3] |
|
||||
| `%j` | `189` | Day of the year (001--366), zero-padded to 3 digits. |
|
||||
| `%D` | `07/08/01` | Month-day-year format. Same as `%m/%d/%y`. |
|
||||
| `%x` | `07/08/01` | Locale's date representation (e.g., 12/31/99). |
|
||||
| `%F` | `2001-07-08` | Year-month-day format (ISO 8601). Same as `%Y-%m-%d`. |
|
||||
| `%v` | ` 8-Jul-2001` | Day-month-year format. Same as `%e-%b-%Y`. |
|
||||
|
||||
### Time specifiers
|
||||
|
||||
| Variable | Example | Description |
|
||||
| :------: | :--------------------------------- | :----------------------------------------------------------------------------------------------------------------------- |
|
||||
| `%H` | `00` | Hour number (00--23), zero-padded to 2 digits. |
|
||||
| `%k` | ` 0` | Same as `%H` but space-padded. Same as `%_H`. |
|
||||
| `%I` | `12` | Hour number in 12-hour clocks (01--12), zero-padded to 2 digits. |
|
||||
| `%l` | `12` | Same as `%I` but space-padded. Same as `%_I`. |
|
||||
| `%P` | `am` | `am` or `pm` in 12-hour clocks. |
|
||||
| `%p` | `AM` | `AM` or `PM` in 12-hour clocks. |
|
||||
| `%M` | `34` | Minute number (00--59), zero-padded to 2 digits. |
|
||||
| `%S` | `60` | Second number (00--60), zero-padded to 2 digits. [^4] |
|
||||
| `%f` | `26490000` | Number of nanoseconds since last whole second. [^7] |
|
||||
| `%.f` | `.026490` | Decimal fraction of a second. Consumes the leading dot. [^7] |
|
||||
| `%.3f` | `.026` | Decimal fraction of a second with a fixed length of 3. |
|
||||
| `%.6f` | `.026490` | Decimal fraction of a second with a fixed length of 6. |
|
||||
| `%.9f` | `.026490000` | Decimal fraction of a second with a fixed length of 9. |
|
||||
| `%3f` | `026` | Decimal fraction of a second like `%.3f` but without the leading dot. |
|
||||
| `%6f` | `026490` | Decimal fraction of a second like `%.6f` but without the leading dot. |
|
||||
| `%9f` | `026490000` | Decimal fraction of a second like `%.9f` but without the leading dot. |
|
||||
| `%R` | `00:34` | Hour-minute format. Same as `%H:%M`. |
|
||||
| `%T` | `00:34:60` | Hour-minute-second format. Same as `%H:%M:%S`. |
|
||||
| `%X` | `00:34:60` | Locale's time representation (e.g., 23:13:48). |
|
||||
| `%r` | `12:34:60 AM` | Locale's 12 hour clock time. (e.g., 11:11:04 PM). Falls back to `%X` if the locale does not have a 12 hour clock format. |
|
||||
|
||||
### Time zone specifiers
|
||||
|
||||
| Variable | Example | Description |
|
||||
| :------: | :--------------------------------- | :----------------------------------------------------------------------------------------------------------------- |
|
||||
| `%Z` | `ACST` | Local time zone name. Skips all non-whitespace characters during parsing. Identical to `%:z` when formatting. [^8] |
|
||||
| `%z` | `+0930` | Offset from the local time to UTC (with UTC being `+0000`). |
|
||||
| `%:z` | `+09:30` | Same as `%z` but with a colon. |
|
||||
| `%::z` | `+09:30:00` | Offset from the local time to UTC with seconds. |
|
||||
| `%:::z` | `+09` | Offset from the local time to UTC without minutes. |
|
||||
| `%#z` | `+09` | *Parsing only:* Same as `%z` but allows minutes to be missing or present. |
|
||||
|
||||
### Date and time specifiers
|
||||
|
||||
| Variable | Example | Description |
|
||||
| :------: | :--------------------------------- | :--------------------------------------------------------------------- |
|
||||
| `%c` | `Sun Jul 8 00:34:60 2001` | Locale's date and time (e.g., Thu Mar 3 23:05:25 2005). |
|
||||
| `%+` | `2001-07-08T00:34:60.026490+09:30` | ISO 8601 / RFC 3339 date & time format. [^5] |
|
||||
| `%s` | `994518299` | UNIX timestamp, the number of seconds since 1970-01-01 00:00 UTC. [^6] |
|
||||
|
||||
### Special specifiers
|
||||
|
||||
| Variable | Example | Description |
|
||||
| :------: | :------ | :---------------------- |
|
||||
| `%t` | | Literal tab (`\t`). |
|
||||
| `%n` | | Literal newline (`\n`). |
|
||||
| `%%` | | Literal percent sign. |
|
||||
|
||||
It is possible to override the default padding behavior of numeric specifiers `%?`.
|
||||
This is not allowed for other specifiers and results in the `BAD_FORMAT` error.
|
||||
|
||||
Modifier | Description
|
||||
-------- | -----------
|
||||
`%-?` | Suppresses any padding including spaces and zeroes. (e.g. `%j` = `012`, `%-j` = `12`)
|
||||
`%_?` | Uses spaces as a padding. (e.g. `%j` = `012`, `%_j` = ` 12`)
|
||||
`%0?` | Uses zeroes as a padding. (e.g. `%e` = ` 9`, `%0e` = `09`)
|
||||
|
||||
Notes:
|
||||
|
||||
[^1]: `%C`, `%y`:
|
||||
This is floor division, so 100 BCE (year number -99) will print `-1` and `99` respectively.
|
||||
[^2]: `%U`:
|
||||
Week 1 starts with the first Sunday in that year.
|
||||
It is possible to have week 0 for days before the first Sunday.
|
||||
|
||||
[^3]: `%G`, `%g`, `%V`:
|
||||
Week 1 is the first week with at least 4 days in that year.
|
||||
Week 0 does not exist, so this should be used with `%G` or `%g`.
|
||||
|
||||
[^4]: `%S`:
|
||||
It accounts for leap seconds, so `60` is possible.
|
||||
|
||||
[^5]: `%+`: Same as `%Y-%m-%dT%H:%M:%S%.f%:z`, i.e. 0, 3, 6 or 9 fractional
|
||||
digits for seconds and colons in the time zone offset.
|
||||
<br>
|
||||
<br>
|
||||
This format also supports having a `Z` or `UTC` in place of `%:z`. They
|
||||
are equivalent to `+00:00`.
|
||||
<br>
|
||||
<br>
|
||||
Note that all `T`, `Z`, and `UTC` are parsed case-insensitively.
|
||||
<br>
|
||||
<br>
|
||||
The typical `strftime` implementations have different (and locale-dependent)
|
||||
formats for this specifier. While Chrono's format for `%+` is far more
|
||||
stable, it is best to avoid this specifier if you want to control the exact
|
||||
output.
|
||||
|
||||
[^6]: `%s`:
|
||||
This is not padded and can be negative.
|
||||
For the purpose of Chrono, it only accounts for non-leap seconds
|
||||
so it slightly differs from ISO C `strftime` behavior.
|
||||
|
||||
[^7]: `%f`, `%.f`:
|
||||
<br>
|
||||
`%f` and `%.f` are notably different formatting specifiers.<br>
|
||||
`%f` counts the number of nanoseconds since the last whole second, while `%.f` is a fraction of a
|
||||
second.<br>
|
||||
Example: 7μs is formatted as `7000` with `%f`, and formatted as `.000007` with `%.f`.
|
||||
|
||||
[^8]: `%Z`:
|
||||
Since `chrono` is not aware of timezones beyond their offsets, this specifier
|
||||
**only prints the offset** when used for formatting. The timezone abbreviation
|
||||
will NOT be printed. See [this issue](https://github.com/chronotope/chrono/issues/960)
|
||||
for more information.
|
||||
<br>
|
||||
<br>
|
||||
Offset will not be populated from the parsed data, nor will it be validated.
|
||||
Timezone is completely ignored. Similar to the glibc `strptime` treatment of
|
||||
this format code.
|
||||
<br>
|
||||
<br>
|
||||
It is not possible to reliably convert from an abbreviation to an offset,
|
||||
for example CDT can mean either Central Daylight Time (North America) or
|
||||
China Daylight Time.
|
||||
*/
|
||||
<!--
|
||||
The content of this page is at /content/shared/v3-distributed-admin-custom-partitions/_partition-templates.md
|
||||
-->
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
title: View partition information
|
||||
description: >
|
||||
Query partition information from InfluxDB v3 system tables to view partition
|
||||
templates and verify partitions are working as intended.
|
||||
menu:
|
||||
influxdb_cloud_dedicated:
|
||||
name: View partitions
|
||||
parent: Manage data partitioning
|
||||
weight: 202
|
||||
list_code_example: |
|
||||
```sql
|
||||
SELECT * FROM system.partitions WHERE table_name = 'example-table'
|
||||
```
|
||||
related:
|
||||
- /influxdb/cloud-dedicated/admin/query-system-data/
|
||||
source: /shared/v3-distributed-admin-custom-partitions/view-partitions.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/v3-distributed-admin-custom-partitions/view-partitions.md
|
||||
-->
|
||||
|
|
@ -128,7 +128,7 @@ influxctl database create \
|
|||
Replace the following in your command:
|
||||
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb/cloud-dedicated/admin/databases/)
|
||||
- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`, `TAG_KEY_3`, and `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag]((/influxdb/cloud-dedicated/reference/glossary/#tag)) keys from your data
|
||||
- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`, `TAG_KEY_3`, and `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag](/influxdb/cloud-dedicated/reference/glossary/#tag) keys from your data
|
||||
|
||||
## Database attributes
|
||||
|
||||
|
|
@ -194,14 +194,6 @@ flags to define partition template parts used to generate partition keys for the
|
|||
|
||||
For more information, see [Manage data partitioning](/influxdb/cloud-dedicated/admin/custom-partitions/).
|
||||
|
||||
{{% note %}}
|
||||
|
||||
#### Partition templates can only be applied on create
|
||||
|
||||
You can only apply a partition template when creating a database.
|
||||
You can't update a partition template on an existing database.
|
||||
{{% /note %}}
|
||||
|
||||
<!-------------------------------- END INFLUXCTL ------------------------------>
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
|
|
@ -235,7 +227,7 @@ _This example uses [cURL](https://curl.se/) to send a Management HTTP API reques
|
|||
to use in the partition template. Limit is 7 total tags or tag buckets.
|
||||
- _Optional_: [InfluxDB tag buckets](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-bucket-part-templates)
|
||||
to use in the partition template. Limit is 7 total tags or tag buckets.
|
||||
- _Optional_: A [Rust strftime date and time string](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates)
|
||||
- _Optional_: A supported [Rust strftime date and time string](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates)
|
||||
that specifies the time format in the partition template and determines
|
||||
the time interval to partition by. Default is `%Y-%m-%d`.
|
||||
- Database name _(see [Database naming restrictions](#database-naming-restrictions))_.
|
||||
|
|
@ -302,7 +294,7 @@ Replace the following in your request:
|
|||
- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the ID of the {{% product-name %}} [cluster](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the database for
|
||||
- {{% code-placeholder-key %}}`MANAGEMENT TOKEN`{{% /code-placeholder-key %}}: a [management token](/influxdb/cloud-dedicated/admin/tokens/management/) for your {{% product-name %}} cluster
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb/cloud-dedicated/admin/databases/)
|
||||
- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`, `TAG_KEY_3`, and `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag]((/influxdb/cloud-dedicated/reference/glossary/#tag)) keys from your data
|
||||
- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`, `TAG_KEY_3`, and `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag](/influxdb/cloud-dedicated/reference/glossary/#tag) keys from your data
|
||||
|
||||
## Database attributes
|
||||
|
||||
|
|
@ -341,21 +333,23 @@ format in the InfluxDB v3 storage engine. By default, data is partitioned by day
|
|||
but, depending on your schema and workload, customizing the partitioning
|
||||
strategy can improve query performance.
|
||||
|
||||
Use the [`partitionTemplate`](/influxdb/cloud-dedicated/api/management/#operation/CreateClusterDatabase) property to define an array of partition template parts used to generate partition keys for the database.
|
||||
Use the [`partitionTemplate`](/influxdb/cloud-dedicated/api/management/#operation/CreateClusterDatabase)
|
||||
property to define an array of partition template parts used to generate
|
||||
partition keys for the database.
|
||||
|
||||
For more information, see [Manage data partitioning](/influxdb/cloud-dedicated/admin/custom-partitions/).
|
||||
|
||||
{{% note %}}
|
||||
<!------------------------------- END cURL ------------------------------------>
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
{{% warn %}}
|
||||
|
||||
#### Partition templates can only be applied on create
|
||||
|
||||
You can only apply a partition template when creating a database.
|
||||
You can't update a partition template on an existing database.
|
||||
{{% /note %}}
|
||||
|
||||
<!------------------------------- END cURL ------------------------------------>
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
{{% /warn %}}
|
||||
|
||||
### Database naming restrictions
|
||||
|
||||
|
|
|
|||
|
|
@ -10,9 +10,9 @@ menu:
|
|||
weight: 104
|
||||
---
|
||||
|
||||
InfluxData provides a Grafana operational dashboard that provide observability
|
||||
into the your {{< product-name >}} cluster. Use the operational dashboard to
|
||||
monitor your cluster.
|
||||
InfluxData provides a Grafana operational dashboard that provides observability
|
||||
into your {{< product-name >}} cluster.
|
||||
Use the operational dashboard to monitor your cluster.
|
||||
|
||||
{{% note %}}
|
||||
#### Not available for all clusters
|
||||
|
|
@ -40,7 +40,7 @@ If you do not have login credentials, [contact InfluxData support](https://suppo
|
|||
|
||||
## Dashboard sections and cells
|
||||
|
||||
The dashboard is divided into the following sections that visualize metrics
|
||||
The dashboard contains the following sections that visualize metrics
|
||||
related to the health of components in your {{< product-name >}} cluster:
|
||||
|
||||
- [Query Tier Cpu/Mem](#query-tier-cpumem)
|
||||
|
|
@ -156,7 +156,7 @@ to the [Object store](/influxdb/cloud-dedicated/reference/internals/storage-engi
|
|||
|
||||
#### CPU Utilization Ingesters (k8s)
|
||||
|
||||
CPU Utilization of Ingester pods as reported by the Kubernetes container usage.
|
||||
CPU utilization of Ingester pods as reported by the Kubernetes container usage.
|
||||
Usage is reported by the number of CPU cores used by pods, including
|
||||
fractional cores.
|
||||
The CPU limit is represented by the top line in the visualization.
|
||||
|
|
@ -284,7 +284,7 @@ _These do not represent the most recent logs._
|
|||
The **Compaction Tier Cpu/Mem** section displays the CPU and memory usage of
|
||||
Compactor pods as reported by Kubernetes.
|
||||
[Compactors](/influxdb/cloud-dedicated/reference/internals/storage-engine/#compactor)
|
||||
process and compress parquet files in the
|
||||
process and compress Parquet files in the
|
||||
[Object store](/influxdb/cloud-dedicated/reference/internals/storage-engine/#object-store)
|
||||
to continually optimize storage.
|
||||
|
||||
|
|
@ -330,10 +330,10 @@ following levels:
|
|||
|
||||
Parquet files store data partitioned by time and optionally tags
|
||||
_(see [Manage data partition](https://docs.influxdata.com/influxdb/cloud-dedicated/admin/custom-partitions/))_.
|
||||
After four L0 files accumulate for a partition, they are are eligible for compaction.
|
||||
After four L0 files accumulate for a partition, they're eligible for compaction.
|
||||
If the compactor is keeping up with the incoming write load, all compaction
|
||||
events will have exactly four files. If the number of L0 files compacted begins to
|
||||
to increase, it indicates the compactor is not keeping up.
|
||||
events have exactly four files.
|
||||
An increase in the number of L0 files compacted indicates the compactor isn't keeping up.
|
||||
|
||||
This histogram helps to determine if the Compactor is starting compactions as
|
||||
soon as it can.
|
||||
|
|
@ -367,7 +367,7 @@ that the Catalog may be overloaded or unresponsive.
|
|||
|
||||
#### Catalog Op Latency (P90)
|
||||
|
||||
The 90th percentile (P90) of query latency against the catalog service per operation.
|
||||
The 90th percentile (P90) query latency for each operation against the catalog service.
|
||||
A high P90 value indicates that the Catalog may be overloaded.
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
---
|
||||
---
|
||||
title: Query system data
|
||||
description: >
|
||||
Query system tables in your InfluxDB Cloud Dedicated cluster to see data related
|
||||
|
|
@ -10,28 +10,42 @@ menu:
|
|||
weight: 105
|
||||
related:
|
||||
- /influxdb/cloud-dedicated/reference/cli/influxctl/query/
|
||||
---
|
||||
- /influxdb/cloud-dedicated/reference/internals/system-tables/
|
||||
---
|
||||
|
||||
{{< product-name >}} stores data related to queries, tables, partitions, and
|
||||
compaction in system tables in your cluster.
|
||||
Query data in your cluster's system tables for information about your cluster.
|
||||
compaction in _system tables_ within your cluster.
|
||||
System tables contain time series data used by and generated from the
|
||||
{{< product-name >}} internal monitoring system.
|
||||
You can query the cluster system tables for information about your cluster.
|
||||
|
||||
- [Query system tables](#query-system-tables)
|
||||
- [Optimize queries to reduce impact to your cluster](#optimize-queries-to-reduce-impact-to-your-cluster)
|
||||
- [System tables](#system-tables)
|
||||
- [Understanding system table data distribution](#understanding-system-table-data-distribution)
|
||||
- [system.queries](#systemqueries)
|
||||
- [system.tables](#systemtables)
|
||||
- [system.partitions](#systempartitions)
|
||||
- [system.compactor](#systemcompactor)
|
||||
- [System query examples](#system-query-examples)
|
||||
- [Query logs](#query-logs)
|
||||
- [Partitions](#partitions)
|
||||
- [Storage usage](#storage-usage)
|
||||
- [Compaction](#compaction)
|
||||
|
||||
{{% warn %}}
|
||||
#### May impact overall cluster performance
|
||||
#### May impact cluster performance
|
||||
|
||||
Querying InfluxDB v3 system tables may impact the overall write and query
|
||||
Querying InfluxDB v3 system tables may impact write and query
|
||||
performance of your {{< product-name omit=" Clustered" >}} cluster.
|
||||
Use filters to [optimize queries to reduce impact to your cluster](#optimize-queries-to-reduce-impact-to-your-cluster).
|
||||
|
||||
<!--------------- UPDATE THE DATE BELOW AS EXAMPLES ARE UPDATED --------------->
|
||||
|
||||
#### System tables are subject to change
|
||||
|
||||
System tables are not part of InfluxDB's stable API and may change with new releases.
|
||||
The provided schema information and query examples are valid as of **April 11, 2024**.
|
||||
The provided schema information and query examples are valid as of **September 18, 2024**.
|
||||
If you detect a schema change or a non-functioning query example, please
|
||||
[submit an issue](https://github.com/influxdata/docs-v2/issues/new/choose).
|
||||
|
||||
|
|
@ -40,16 +54,13 @@ If you detect a schema change or a non-functioning query example, please
|
|||
|
||||
## Query system tables
|
||||
|
||||
{{% warn %}}
|
||||
_Querying system tables [may impact overall cluster performance](#may-impact-overall-cluster-performance)._
|
||||
{{% /warn %}}
|
||||
|
||||
{{% note %}}
|
||||
Querying system tables with `influxctl` requires **`influxctl` v2.8.0 or newer**.
|
||||
{{% /note %}}
|
||||
|
||||
Use the [`influxctl query` command](/influxdb/cloud-dedicated/reference/cli/influxctl/query/)
|
||||
and SQL to query system tables. Provide the following:
|
||||
and SQL to query system tables.
|
||||
Provide the following:
|
||||
|
||||
- **Enable system tables** with the `--enable-system-tables` command flag.
|
||||
- **Database token**: A [database token](/influxdb/cloud-dedicated/admin/tokens/#database-tokens)
|
||||
|
|
@ -61,6 +72,7 @@ and SQL to query system tables. Provide the following:
|
|||
[`influxctl` connection profile](/influxdb/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles)
|
||||
or the `--database` command flag.
|
||||
- **SQL query**: The SQL query to execute.
|
||||
|
||||
Pass the query in one of the following ways:
|
||||
|
||||
- a string on the command line
|
||||
|
|
@ -119,12 +131,152 @@ Replace the following:
|
|||
When prompted, enter `y` to acknowledge the potential impact querying system
|
||||
tables may have on your cluster.
|
||||
|
||||
### Optimize queries to reduce impact to your cluster
|
||||
|
||||
Querying InfluxDB v3 system tables may impact the performance of your
|
||||
{{< product-name omit=" Clustered" >}} cluster.
|
||||
As you write data to a cluster, the number of partitions and Parquet files
|
||||
can increase to a point that impacts system table performance.
|
||||
Queries that took milliseconds with fewer files and partitions might take 10
|
||||
seconds or longer as files and partitions increase.
|
||||
|
||||
Use the following filters to optimize your system table queries and reduce the impact on your
|
||||
cluster's performance.
|
||||
|
||||
In your queries, replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the table to retrieve partitions for
|
||||
- {{% code-placeholder-key %}}`PARTITION_ID`{{% /code-placeholder-key %}}: a [partition ID](#retrieve-a-partition-id) (int64)
|
||||
- {{% code-placeholder-key %}}`PARTITION_KEY`{{% /code-placeholder-key %}}: a [partition key](/influxdb/cloud-dedicated/admin/custom-partitions/#partition-keys)
|
||||
derived from the table's partition template.
|
||||
The default format is `%Y-%m-%d` (for example, `2024-01-01`).
|
||||
|
||||
##### Filter by table name
|
||||
|
||||
When querying the `system.tables`, `system.partitions`, or `system.compactor` tables, use the
|
||||
`WHERE` clause to filter by `table_name` .
|
||||
|
||||
{{% code-placeholders "TABLE_NAME" %}}
|
||||
```sql
|
||||
SELECT * FROM system.partitions WHERE table_name = 'TABLE_NAME'
|
||||
```
|
||||
{{% /code-placeholders%}}
|
||||
|
||||
##### Filter by partition key
|
||||
|
||||
When querying the `system.partitions` or `system.compactor` tables, use the `WHERE` clause to
|
||||
filter by `partition_key`.
|
||||
|
||||
{{% code-placeholders "PARTITION_KEY" %}}
|
||||
```sql
|
||||
SELECT * FROM system.partitions WHERE partition_key = 'PARTITION_KEY'
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
To further improve performance, use `AND` to pair `partition_key` with `table_name`--for example:
|
||||
|
||||
{{% code-placeholders "TABLE_NAME|PARTITION_KEY" %}}
|
||||
```sql
|
||||
SELECT *
|
||||
FROM system.partitions
|
||||
WHERE
|
||||
table_name = 'TABLE_NAME'
|
||||
AND partition_key = 'PARTITION_KEY';
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
##### Filter by partition ID
|
||||
|
||||
When querying the `system.partitions` or `system.compactor` table, use the `WHERE` clause to
|
||||
filter by `partition_id` .
|
||||
|
||||
{{% code-placeholders "PARTITION_ID" %}}
|
||||
```sql
|
||||
SELECT * FROM system.partitions WHERE partition_id = PARTITION_ID
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
For the most optimized approach, use `AND` to pair `partition_id` with `table_name`--for example:
|
||||
|
||||
{{% code-placeholders "TABLE_NAME|PARTITION_ID" %}}
|
||||
```sql
|
||||
SELECT *
|
||||
FROM system.partitions
|
||||
WHERE
|
||||
table_name = 'TABLE_NAME'
|
||||
AND partition_id = PARTITION_ID;
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
Although you don't need to pair `partition_id` with `table_name` (because a partition ID is unique within a cluster),
|
||||
it's the most optimized approach, _especially when you have many tables in a database_.
|
||||
|
||||
###### Retrieve a partition ID
|
||||
|
||||
To retrieve a partition ID, query `system.partitions` for a `table_name` and `partition_key` pair--for example:
|
||||
|
||||
{{% code-placeholders "TABLE_NAME|PARTITION_KEY" %}}
|
||||
```sql
|
||||
SELECT
|
||||
table_name,
|
||||
partition_key,
|
||||
partition_id
|
||||
FROM system.partitions
|
||||
WHERE
|
||||
table_name = 'TABLE_NAME'
|
||||
AND partition_key = 'PARTITION_KEY';
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
The result contains the `partition_id`:
|
||||
|
||||
| table_name | partition_key | partition_id |
|
||||
| :--------- | :---------------- | -----------: |
|
||||
| weather | 43 \| 2020-05-27 | 1362 |
|
||||
|
||||
##### Combine filters for performance improvement
|
||||
|
||||
Use the `AND`, `OR`, or `IN` keywords to combine filters in your query.
|
||||
|
||||
- **Use `OR` or `IN` conditions when filtering for different values in the same column**--for example:
|
||||
|
||||
```sql
|
||||
WHERE partition_id = 1 OR partition_id = 2
|
||||
```
|
||||
|
||||
Use `IN` to make multiple `OR` conditions more readable--for example:
|
||||
|
||||
```sql
|
||||
WHERE table_name IN ('foo', 'bar', 'baz')
|
||||
```
|
||||
|
||||
- **Avoid mixing different columns in `OR` conditions**, as this won't improve performance--for example:
|
||||
|
||||
```sql
|
||||
WHERE table_name = 'foo' OR partition_id = 2 -- This will not improve performance
|
||||
```
|
||||
|
||||
## System tables
|
||||
|
||||
{{% warn %}}
|
||||
_System tables are [subject to change](#system-tables-are-subject-to-change)._
|
||||
{{% /warn %}}
|
||||
|
||||
### Understanding system table data distribution
|
||||
|
||||
Data in `system.tables`, `system.partitions`, and `system.compactor` includes
|
||||
data for all [InfluxDB Queriers](/influxdb/cloud-dedicated/reference/internals/storage-engine/#querier) in your cluster.
|
||||
The data comes from the catalog, and because all the queriers share one catalog,
|
||||
the results from these three tables derive from the same source data,
|
||||
regardless of which querier you connect to.
|
||||
|
||||
However, the `system.queries` table is different--data is local to each Querier.
|
||||
`system.queries` contains a non-persisted log of queries run against the current
|
||||
querier to which your query is routed.
|
||||
The query log is specific to the current Querier and isn't shared across
|
||||
queriers in your cluster.
|
||||
Logs are scoped to the specified database.
|
||||
|
||||
- [system.queries](#systemqueries)
|
||||
- [system.tables](#systemtables)
|
||||
- [system.partitions](#systempartitions)
|
||||
|
|
@ -132,12 +284,18 @@ _System tables are [subject to change](#system-tables-are-subject-to-change)._
|
|||
|
||||
### system.queries
|
||||
|
||||
The `system.queries` table contains an unpersisted log of queries run against
|
||||
the current [InfluxDB Querier](/influxdb/cloud-dedicated/reference/internals/storage-engine/#querier)
|
||||
to which your query is routed.
|
||||
The query log is specific to the current Querier and is not shared across Queriers
|
||||
in your cluster.
|
||||
Logs are scoped to the specified database.
|
||||
The `system.queries` table stores log entries for queries executed for the provided namespace (database) on the node that is _currently handling queries_.
|
||||
`system.queries` reflects a process-local, in-memory, namespace-scoped query log.
|
||||
|
||||
While this table may be useful for debugging and monitoring queries, keep the following in mind:
|
||||
|
||||
- Records stored in `system.queries` are transient and volatile
|
||||
- InfluxDB deletes `system.queries` records during pod restarts.
|
||||
- Queries for one namespace can evict records from another namespace.
|
||||
- Data reflects the state of a specific pod answering queries for the namespace.
|
||||
- Data isn't shared across queriers in your cluster.
|
||||
- A query for records in `system.queries` can return different results
|
||||
depending on the pod the request was routed to.
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
{{% expand "View `system.queries` schema" %}}
|
||||
|
|
@ -146,9 +304,9 @@ The `system.queries` table contains the following columns:
|
|||
|
||||
- id
|
||||
- phase
|
||||
- issue_time
|
||||
- query_type
|
||||
- query_text
|
||||
- **issue_time**: timestamp when the query was issued
|
||||
- **query_type**: type (syntax: `sql`, `flightsql`, or `influxql`) of the query
|
||||
- **query_text**: query statement text
|
||||
- partitions
|
||||
- parquet_files
|
||||
- plan_duration
|
||||
|
|
@ -157,14 +315,20 @@ The `system.queries` table contains the following columns:
|
|||
- end2end_duration
|
||||
- compute_duration
|
||||
- max_memory
|
||||
- success
|
||||
- **success**: execution status (boolean) of the query
|
||||
- running
|
||||
- cancelled
|
||||
- trace_id
|
||||
- **trace_id**: trace ID for debugging and monitoring events
|
||||
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
{{% note %}}
|
||||
_When listing measurements (tables) available within a namespace,
|
||||
some clients and query tools may include the `queries` table in the list of
|
||||
namespace tables._
|
||||
{{% /note %}}
|
||||
|
||||
### system.tables
|
||||
|
||||
The `system.tables` table contains information about tables in the specified database.
|
||||
|
|
@ -202,7 +366,7 @@ The `system.partitions` table contains the following columns:
|
|||
|
||||
### system.compactor
|
||||
|
||||
The `system.compaction` table contains information about compacted partition Parquet
|
||||
The `system.compactor` table contains information about compacted partition Parquet
|
||||
files associated with the specified database.
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
|
|
@ -222,27 +386,36 @@ The `system.compactor` table contains the following columns:
|
|||
- skipped_reason
|
||||
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
## System query examples
|
||||
|
||||
{{% warn %}}
|
||||
#### May impact cluster performance
|
||||
|
||||
Querying InfluxDB v3 system tables may impact write and query
|
||||
performance of your {{< product-name omit=" Clustered" >}} cluster.
|
||||
|
||||
The examples in this section include `WHERE` filters to [optimize queries and reduce impact to your cluster](#optimize-queries-to-reduce-impact-to-your-cluster).
|
||||
{{% /warn %}}
|
||||
|
||||
- [Query logs](#query-logs)
|
||||
- [View all stored query logs](#view-all-stored-query-logs)
|
||||
- [View query logs for queries with end-to-end durations above a threshold](#view-query-logs-for-queries-with-end-to-end-durations-above-a-threshold)
|
||||
- [View query logs for a specific query within a time interval](#view-query-logs-for-a-specific-query-within-a-time-interval)
|
||||
- [Partitions](#partitions)
|
||||
- [View partition templates of all tables](#view-partition-templates-of-all-tables)
|
||||
- [View the partition template of a specific table](#view-the-partition-template-of-a-specific-table)
|
||||
- [View all partitions for a table](#view-all-partitions-for-a-table)
|
||||
- [View the number of partitions per table](#view-the-number-of-partitions-per-table)
|
||||
- [View the number of partitions for a specific table](#view-the-number-of-partitions-for-a-specific-table)
|
||||
- [Storage usage](#storage-usage)
|
||||
- [View the size of tables in megabytes](#view-the-size-of-tables-in-megabytes)
|
||||
- [View the size of a specific table in megabytes](#view-the-size-of-a-specific-table-in-megabytes)
|
||||
- [View the total size of all compacted partitions per table in bytes](#view-the-total-size-of-all-compacted-partitions-per-table-in-bytes)
|
||||
- [View the total size of all compacted partitions in bytes](#view-the-total-size-of-all-compacted-partitions-in-bytes)
|
||||
- [View the size in megabytes of a specific table](#view-the-size-in-megabytes-of-a-specific-table)
|
||||
- [View the size in megabytes per table](#view-the-size-in-megabytes-per-table)
|
||||
- [View the total size in bytes of compacted partitions per table](#view-the-total-size-in-bytes-of-compacted-partitions-per-table)
|
||||
- [View the total size in bytes of compacted partitions for a specific table](#view-the-total-size-in-bytes-of-compacted-partitions-for-a-specific-table)
|
||||
- [Compaction](#compaction)
|
||||
- [View overall compaction totals for each table](#view-overall-compaction-totals-for-each-table)
|
||||
- [View overall compaction totals for a specific table](#view-overall-compaction-totals-for-a-specific-table)
|
||||
- [View compaction totals for each table](#view-compaction-totals-for-each-table)
|
||||
- [View compaction totals for a specific table](#view-compaction-totals-for-a-specific-table)
|
||||
|
||||
In the examples below, replace {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}
|
||||
with the name of the table you want to query information about.
|
||||
|
|
@ -265,29 +438,75 @@ The following returns query logs for queries with an end-to-end duration greater
|
|||
than 50 milliseconds.
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.queries WHERE end2end_duration::BIGINT > (50 * 1000000)
|
||||
SELECT *
|
||||
FROM
|
||||
system.queries
|
||||
WHERE
|
||||
end2end_duration::BIGINT > (50 * 1000000)
|
||||
```
|
||||
|
||||
---
|
||||
### View query logs for a specific query within a time interval
|
||||
|
||||
{{< code-tabs >}}
|
||||
{{% tabs %}}
|
||||
[SQL](#)
|
||||
[Python](#)
|
||||
{{% /tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
<!-----------------------------------BEGIN SQL------------------------------>
|
||||
```sql
|
||||
SELECT *
|
||||
FROM system.queries
|
||||
WHERE issue_time >= now() - INTERVAL '1 day'
|
||||
AND query_text LIKE '%select * from home%'
|
||||
```
|
||||
<!-----------------------------------END SQL------------------------------>
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
<!-----------------------------------BEGIN PYTHON------------------------------>
|
||||
```python
|
||||
from influxdb_client_3 import InfluxDBClient3
|
||||
client = InfluxDBClient3(token = DATABASE_TOKEN,
|
||||
host = HOSTNAME,
|
||||
org = '',
|
||||
database=DATABASE_NAME)
|
||||
client.query('select * from home')
|
||||
reader = client.query('''
|
||||
SELECT *
|
||||
FROM system.queries
|
||||
WHERE issue_time >= now() - INTERVAL '1 day'
|
||||
AND query_text LIKE '%select * from home%'
|
||||
''',
|
||||
language='sql',
|
||||
headers=[(b"iox-debug", b"true")],
|
||||
mode="reader")
|
||||
```
|
||||
<!-----------------------------------END PYTHON------------------------------>
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs >}}
|
||||
|
||||
---
|
||||
|
||||
### Partitions
|
||||
|
||||
#### View partition templates of all tables
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.tables
|
||||
```
|
||||
|
||||
#### View the partition template of a specific table
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.tables WHERE table_name = 'TABLE_NAME'
|
||||
SELECT *
|
||||
FROM
|
||||
system.tables
|
||||
WHERE
|
||||
table_name = 'TABLE_NAME'
|
||||
```
|
||||
|
||||
#### View all partitions for a table
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.partitions WHERE table_name = 'TABLE_NAME'
|
||||
SELECT *
|
||||
FROM
|
||||
system.partitions
|
||||
WHERE
|
||||
table_name = 'TABLE_NAME'
|
||||
```
|
||||
|
||||
#### View the number of partitions per table
|
||||
|
|
@ -298,6 +517,8 @@ SELECT
|
|||
COUNT(*) AS partition_count
|
||||
FROM
|
||||
system.partitions
|
||||
WHERE
|
||||
table_name IN ('foo', 'bar', 'baz')
|
||||
GROUP BY
|
||||
table_name
|
||||
```
|
||||
|
|
@ -313,23 +534,11 @@ WHERE
|
|||
table_name = 'TABLE_NAME'
|
||||
```
|
||||
|
||||
---
|
||||
---
|
||||
|
||||
### Storage usage
|
||||
|
||||
#### View the size of tables in megabytes
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
table_name,
|
||||
SUM(total_size_mb) AS total_size_mb
|
||||
FROM
|
||||
system.partitions
|
||||
GROUP BY
|
||||
table_name
|
||||
```
|
||||
|
||||
#### View the size of a specific table in megabytes
|
||||
#### View the size in megabytes of a specific table
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
|
|
@ -340,7 +549,21 @@ WHERE
|
|||
table_name = 'TABLE_NAME'
|
||||
```
|
||||
|
||||
#### View the total size of all compacted partitions per table in bytes
|
||||
#### View the size in megabytes per table
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
table_name,
|
||||
SUM(total_size_mb) AS total_size_mb
|
||||
FROM
|
||||
system.partitions
|
||||
WHERE
|
||||
table_name IN ('foo', 'bar', 'baz')
|
||||
GROUP BY
|
||||
table_name
|
||||
```
|
||||
|
||||
#### View the total size in bytes of compacted partitions per table
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
|
|
@ -348,24 +571,28 @@ SELECT
|
|||
SUM(total_l0_bytes) + SUM(total_l1_bytes) + SUM(total_l2_bytes) AS total_bytes
|
||||
FROM
|
||||
system.compactor
|
||||
WHERE
|
||||
table_name IN ('foo', 'bar', 'baz')
|
||||
GROUP BY
|
||||
table_name
|
||||
```
|
||||
|
||||
#### View the total size of all compacted partitions in bytes
|
||||
#### View the total size in bytes of compacted partitions for a specific table
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
SUM(total_l0_bytes) + SUM(total_l1_bytes) + SUM(total_l2_bytes) AS total_bytes
|
||||
FROM
|
||||
system.compactor
|
||||
WHERE
|
||||
table_name = 'TABLE_NAME'
|
||||
```
|
||||
|
||||
---
|
||||
---
|
||||
|
||||
### Compaction
|
||||
|
||||
#### View overall compaction totals for each table
|
||||
#### View compaction totals for each table
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
|
|
@ -378,11 +605,13 @@ SELECT
|
|||
SUM(total_l2_bytes) AS total_l2_bytes
|
||||
FROM
|
||||
system.compactor
|
||||
WHERE
|
||||
table_name IN ('foo', 'bar', 'baz')
|
||||
GROUP BY
|
||||
table_name
|
||||
```
|
||||
|
||||
#### View overall compaction totals for a specific table
|
||||
#### View compaction totals for a specific table
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
|
|
|
|||
|
|
@ -14,14 +14,14 @@ of [Auth0](https://auth0.com) and your identity provider of choice.
|
|||
Use SSO to provide users seamless access to your {{< product-name >}} cluster
|
||||
with an existing set of credentials.
|
||||
|
||||
{{% cloud %}}
|
||||
#### Contact InfluxData sales to enable SSO
|
||||
|
||||
SSO is a paid upgrade to your {{< product-name >}} cluster.
|
||||
To begin the process of enabling SSO, contact InfluxData Sales:
|
||||
|
||||
<a class="btn" href="https://www.influxdata.com/contact-sales/">Contact InfluxData Sales</a>
|
||||
{{% /cloud %}}
|
||||
> [!Important]
|
||||
>
|
||||
> #### Contact InfluxData sales to enable SSO
|
||||
>
|
||||
> SSO is a paid upgrade to your {{< product-name >}} cluster.
|
||||
> To begin the process of enabling SSO, contact InfluxData Sales:
|
||||
>
|
||||
> <a class="btn" href="https://www.influxdata.com/contact-sales/">Contact InfluxData Sales</a>
|
||||
|
||||
- [SSO authorization flow](#sso-authorization-flow)
|
||||
- [Set up your identity provider](#set-up-your-identity-provider)
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ to a table, you must manually create the table before you write any data to it.
|
|||
to use in the partition template
|
||||
- _Optional_: [InfluxDB tag buckets](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-bucket-part-templates)
|
||||
to use in the partition template
|
||||
- _Optional_: A [Rust strftime date and time string](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates)
|
||||
- _Optional_: A supported [Rust strftime date and time string](/influxdb/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates)
|
||||
that specifies the time format in the partition template and determines
|
||||
the time interval to partition by _(default is `%Y-%m-%d`)_
|
||||
- The name of the database to create the table in
|
||||
|
|
@ -71,9 +71,9 @@ If no template flags are provided, the table uses the partition template of the
|
|||
target database.
|
||||
For more information, see [Manage data partitioning](/influxdb/cloud-dedicated/admin/custom-partitions/).
|
||||
|
||||
{{% note %}}
|
||||
{{% warn %}}
|
||||
#### Partition templates can only be applied on create
|
||||
|
||||
You can only apply a partition template when creating a table.
|
||||
There is no way to update a partition template on an existing table.
|
||||
{{% /note %}}
|
||||
{{% /warn %}}
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ related:
|
|||
---
|
||||
|
||||
Use the [`influxctl` CLI](/influxdb/cloud-dedicated/reference/cli/influxctl/)
|
||||
or the [Management HTTP API](influxdb/cloud-dedicated/api/management/) to create a [database token](/influxdb/cloud-dedicated/admin/tokens/database/) with permissions for reading and writing data in your {{< product-name omit=" Clustered" >}} cluster.
|
||||
or the [Management HTTP API](/influxdb/cloud-dedicated/api/management/) to create a [database token](/influxdb/cloud-dedicated/admin/tokens/database/) with permissions for reading and writing data in your {{< product-name omit=" Clustered" >}} cluster.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
|
|
@ -435,4 +435,4 @@ curl \
|
|||
|
||||
{{% /code-placeholders %}}
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ related:
|
|||
---
|
||||
|
||||
Use the [`influxctl` CLI](/influxdb/cloud-dedicated/reference/cli/influxctl/)
|
||||
or the [Management HTTP API](influxdb/cloud-dedicated/api/management/)
|
||||
or the [Management HTTP API](/influxdb/cloud-dedicated/api/management/)
|
||||
to delete a database token from your {{< product-name omit=" Clustered" >}} cluster.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ related:
|
|||
---
|
||||
|
||||
Use the [`influxctl` CLI](/influxdb/cloud-dedicated/reference/cli/influxctl/)
|
||||
or the [Management HTTP API](influxdb/cloud-dedicated/api/management/)
|
||||
or the [Management HTTP API](/influxdb/cloud-dedicated/api/management/)
|
||||
to list database tokens in your {{< product-name omit=" Clustered" >}} cluster.
|
||||
|
||||
[List database tokens](#list-database-tokens)
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ related:
|
|||
---
|
||||
|
||||
Use the [`influxctl` CLI](/influxdb/cloud-dedicated/reference/cli/influxctl/)
|
||||
or the [Management HTTP API](influxdb/cloud-dedicated/api/management/)
|
||||
or the [Management HTTP API](/influxdb/cloud-dedicated/api/management/)
|
||||
to update a database token's permissions {{< product-name omit=" Clustered" >}} cluster.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ menu:
|
|||
name: Management tokens
|
||||
weight: 101
|
||||
influxdb/cloud-dedicated/tags: [tokens]
|
||||
related:
|
||||
- /influxdb/cloud-dedicated/reference/internals/security/
|
||||
---
|
||||
|
||||
Management tokens grant permission to perform administrative actions such as
|
||||
|
|
@ -21,24 +23,27 @@ managing users, databases, and database tokens in your
|
|||
Management tokens do _not_ grant permissions to write or query time series data
|
||||
in your {{< product-name omit=" Clustered">}} cluster.
|
||||
|
||||
To grant write or query permissions, use management tokens to create [database tokens](/influxdb/cloud-dedicated/admin/tokens/database/).
|
||||
To grant write or query permissions, use management tokens to create
|
||||
[database tokens](/influxdb/cloud-dedicated/admin/tokens/database/).
|
||||
{{% /note %}}
|
||||
|
||||
By default, management tokens are short-lived tokens issued by an OAuth2 identity
|
||||
provider that grant a specific user administrative access to your
|
||||
{{< product-name omit=" Clustered">}} cluster.
|
||||
By default, management tokens are short-lived tokens issued by your identity
|
||||
provider for a [specific client session](/influxdb/cloud-dedicated/reference/internals/security/#management-tokens-in-the-influxctl-cli) (for example, `influxctl`).
|
||||
|
||||
However, for automation purposes, you can manually create management tokens that
|
||||
authenticate directly with your InfluxDB Cluster and do not require human
|
||||
interaction with your identity provider.
|
||||
_Manually created management tokens provide full access to all account resources
|
||||
and aren't affected by [user groups](/influxdb/cloud-dedicated/reference/internals/security/#user-groups)_.
|
||||
|
||||
{{% warn %}}
|
||||
#### For automation use cases only
|
||||
|
||||
The tools outlined below are meant for automation use cases and should not be
|
||||
used to circumvent your identity provider. **Take great care when manually creating
|
||||
and using management tokens**.
|
||||
The tools outlined below are meant for automation use cases and shouldn't be
|
||||
used to circumvent your identity provider or user group permissions.
|
||||
**Take great care when manually creating and using management tokens**.
|
||||
|
||||
{{< product-name >}} requires at least one user associated with your cluster
|
||||
{{< product-name >}} requires at least one [Admin user](/influxdb/cloud-dedicated/reference/internals/security/#admin-user-group) associated with your cluster
|
||||
and authorized through your OAuth2 identity provider to manually create a
|
||||
management token.
|
||||
{{% /warn %}}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,92 @@
|
|||
---
|
||||
title: Manage users
|
||||
seotitle: Manage users and permissions in InfluxDB Cloud Dedicated
|
||||
description: >
|
||||
Manage users and access to resources in your InfluxDB Cloud Dedicated cluster.
|
||||
Assign user groups for role-based access control and security.
|
||||
menu:
|
||||
influxdb_cloud_dedicated:
|
||||
parent: Administer InfluxDB Cloud
|
||||
weight: 101
|
||||
influxdb/cloud-dedicated/tags: [user groups]
|
||||
related:
|
||||
- /influxdb/cloud-dedicated/reference/internals/security/
|
||||
- /influxdb/cloud-dedicated/admin/tokens/
|
||||
---
|
||||
|
||||
Manage users and access to resources in your {{% product-name %}} cluster.
|
||||
|
||||
By assigning users to different groups based on the level of access they need,
|
||||
you can minimize unnecessary access and reduce the risk of inadvertent
|
||||
actions.
|
||||
User groups associate access privileges with user attributes--an important part of the
|
||||
Attribute-Based Access Control (ABAC) security model which grants access based on
|
||||
user attributes, resource types, and environment context.
|
||||
|
||||
- [Available user groups](#available-user-groups)
|
||||
- [Manage users](#manage-users)
|
||||
|
||||
## Available user groups
|
||||
|
||||
In {{% product-name %}}, users have "management" responsibilities, such as creating and
|
||||
deleting [databases](/influxdb/cloud-dedicated/admin/databases/), [viewing resource information](/influxdb/cloud-dedicated/admin/monitor-your-cluster/), and provisioning
|
||||
[database tokens](/influxdb/cloud-dedicated/admin/tokens/database/) for reading and writing data.
|
||||
|
||||
A user can belong to the following groups, each with predefined privileges:
|
||||
|
||||
- **Admin**: Read and write permissions on all resources.
|
||||
- **Member**: Read permission on certain resources and create permission for
|
||||
database tokens; members can't delete or create databases or management tokens.
|
||||
- **Auditor**: Read permission on all resources; auditors can't modify resources.
|
||||
|
||||
{{% note %}}
|
||||
#### Existing users are Admin by default
|
||||
|
||||
With the release of user groups for {{% product-name %}}, all existing users
|
||||
in your account are initially assigned to the Admin group, retaining full
|
||||
access to resources in your cluster.
|
||||
{{% /note %}}
|
||||
|
||||
## Manage users
|
||||
|
||||
- [Assign a user to a different group](#assign-a-user-to-a-different-group)
|
||||
- [Invite a user to your account](#invite-a-user-to-your-account)
|
||||
|
||||
### Assign a user to a different group
|
||||
|
||||
To assign existing users in your account to different
|
||||
groups, [contact InfluxData support](https://support.influxdata.com/s/login/)
|
||||
and provide the list of users and the desired [user groups](#available-user-groups)
|
||||
for each.
|
||||
|
||||
### Invite a user to your account
|
||||
|
||||
For new users that you want to add to your account, the InfluxData Support Team
|
||||
configures invitations with the attributes and groups that you specify.
|
||||
|
||||
1. [Contact InfluxData support](https://support.influxdata.com/s/login/)
|
||||
to invite a user to your account.
|
||||
In your request, provide the user details, including email address, desired
|
||||
[user groups](#available-user-groups), and other attributes for the user.
|
||||
2. InfluxData support creates the user account and emails the user an invitation
|
||||
that includes following:
|
||||
|
||||
- A login URL to authenticate access to the cluster
|
||||
- The {{% product-name %}} **account ID**
|
||||
- The {{% product-name %}} **cluster ID**
|
||||
- The {{% product-name %}} **cluster URL**
|
||||
- A password reset email for setting the login password
|
||||
|
||||
3. The user accepts the invitation to your account
|
||||
|
||||
With a valid password, the user can access cluster resources by interacting with the
|
||||
[`influxctl`](/influxdb/cloud-dedicated/reference/influxctl/) command line tool.
|
||||
The assigned user groups determine the user's access to resources.
|
||||
|
||||
{{% note %}}
|
||||
#### Use database tokens to authorize data reads and writes
|
||||
|
||||
In {{% product-name %}}, user groups control access for managing cluster resources.
|
||||
[Database tokens](/influxdb/cloud-dedicated/admin/tokens/database/) control access
|
||||
for reading and writing data in cluster databases.
|
||||
{{% /note %}}
|
||||
|
|
@ -162,7 +162,7 @@ WHERE
|
|||
{{% influxdb/custom-timestamps %}}
|
||||
```sql
|
||||
SELECT
|
||||
DATE_BIN(INTERVAL '1 hour', time, '2022-01-01T00:00:00Z'::TIMESTAMP) as _time,
|
||||
DATE_BIN(INTERVAL '1 hour', time, '2022-01-01T00:00:00Z') as _time,
|
||||
room,
|
||||
selector_max(temp, time)['value'] AS 'max temp'
|
||||
FROM
|
||||
|
|
@ -593,8 +593,7 @@ _If your project's virtual environment is already running, skip to step 3._
|
|||
"time"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/apache/arrow/go/v13/arrow"
|
||||
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
|
||||
"github.com/InfluxCommunity/influxdb3-go/v2/influxdb3"
|
||||
)
|
||||
|
||||
func Query() error {
|
||||
|
|
@ -638,10 +637,9 @@ _If your project's virtual environment is already running, skip to step 3._
|
|||
// Iterate over rows and prints column values in table format.
|
||||
for iterator.Next() {
|
||||
row := iterator.Value()
|
||||
// Use Go arrow and time packages to format unix timestamp
|
||||
// Use Go time package to format unix timestamp
|
||||
// as a time with timezone layout (RFC3339).
|
||||
time := (row["time"].(arrow.Timestamp)).
|
||||
ToTime(arrow.TimeUnit(arrow.Nanosecond)).
|
||||
time := (row["time"].(time.Time)).
|
||||
Format(time.RFC3339)
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t%.1f\t%.1f\n",
|
||||
time, row["room"], row["co"], row["hum"], row["temp"])
|
||||
|
|
@ -661,8 +659,7 @@ _If your project's virtual environment is already running, skip to step 3._
|
|||
- `io`
|
||||
- `os`
|
||||
- `text/tabwriter`
|
||||
- `github.com/apache/arrow/go/v13/arrow`
|
||||
- `github.com/InfluxCommunity/influxdb3-go/influxdb3`
|
||||
- `github.com/InfluxCommunity/influxdb3-go/v2/influxdb3`
|
||||
|
||||
2. Defines a `Query()` function that does the following:
|
||||
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ weight: 101
|
|||
metadata: [1 / 3]
|
||||
related:
|
||||
- /influxdb/cloud-dedicated/admin/databases/
|
||||
- /influxdb/cloud-dedicated/admin/tokens/
|
||||
- /influxdb/cloud-dedicated/reference/cli/influxctl/
|
||||
- /influxdb/cloud-dedicated/reference/api/
|
||||
---
|
||||
|
|
|
|||
|
|
@ -835,7 +835,7 @@ To write data to {{% product-name %}} using Go, use the InfluxDB v3
|
|||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
|
||||
"github.com/InfluxCommunity/influxdb3-go/v2/influxdb3"
|
||||
)
|
||||
|
||||
// Write line protocol data to InfluxDB
|
||||
|
|
|
|||
|
|
@ -457,7 +457,8 @@ For {{% product-name %}} v1 API `/query` requests, set parameters as listed in t
|
|||
|
||||
Parameter | Allowed in | Ignored | Value
|
||||
----------|------------|---------|-------------------------------------------------------------------------
|
||||
`chunked` | | Ignored | N/A _(Note that an unbounded query might return a large amount of data)_
|
||||
`chunked` | Query string | Honored | Returns points in streamed batches instead of in a single response. If set to `true`, InfluxDB chunks responses by series or by every 10,000 points, whichever occurs first.
|
||||
`chunked_size` | Query string | Honored | **Requires `chunked` to be set to `true`**. If set to a specific value, InfluxDB chunks responses by series or by this number of points.
|
||||
`db` | Query string | Honored | Database name |
|
||||
`epoch` | Query string | Honored | [Timestamp precision](#timestamp-precision) |
|
||||
`p` | Query string | Honored | Database token
|
||||
|
|
|
|||
|
|
@ -28,10 +28,9 @@ that queries data from an InfluxDB Cloud TSM-powered bucket in time-based batche
|
|||
and writes each batch to an {{< product-name >}} (InfluxDB v3) database in
|
||||
another organization.
|
||||
|
||||
{{% cloud %}}
|
||||
All query requests are subject to your InfluxDB Cloud organization's
|
||||
[rate limits and adjustable quotas](/influxdb/cloud/account-management/limits/).
|
||||
{{% /cloud %}}
|
||||
> [!Important]
|
||||
> All query requests are subject to your InfluxDB Cloud organization's
|
||||
> [rate limits and adjustable quotas](/influxdb/cloud/account-management/limits/).
|
||||
|
||||
- [Before you migrate](#before-you-migrate)
|
||||
- [Set up the migration](#set-up-the-migration)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ list_code_example: |
|
|||
```go
|
||||
import (
|
||||
"context"
|
||||
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
|
||||
"github.com/InfluxCommunity/influxdb3-go/v2/influxdb3"
|
||||
)
|
||||
|
||||
func Query() error {
|
||||
|
|
@ -94,7 +94,7 @@ to install a recent version of the Go programming language for your system.
|
|||
In your terminal, enter the following command to download and install the client library:
|
||||
|
||||
```sh
|
||||
go get github.com/InfluxCommunity/influxdb3-go
|
||||
go get github.com/InfluxCommunity/influxdb3-go/v2
|
||||
```
|
||||
|
||||
- [`influxdb3-go`](https://github.com/InfluxCommunity/influxdb3-go) {{< req text="\* " color="magenta" >}}: Provides the `influxdb3` package and also installs the [Apache `arrow` module](https://arrow.apache.org/docs/python/index.html) for working with Arrow data returned from queries.
|
||||
|
|
@ -139,8 +139,7 @@ import (
|
|||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
|
||||
"github.com/apache/arrow/go/v13/arrow"
|
||||
"github.com/InfluxCommunity/influxdb3-go/v2/influxdb3"
|
||||
)
|
||||
|
||||
func Query() error {
|
||||
|
|
@ -187,10 +186,9 @@ func Query() error {
|
|||
fmt.Fprintln(w, "Process each row as key-value pairs:")
|
||||
for iterator2.Next() {
|
||||
row := iterator2.Value()
|
||||
// Use Go arrow and time packages to format unix timestamp
|
||||
// Use Go time package to format unix timestamp
|
||||
// as a time with timezone layout (RFC3339)
|
||||
time := (row["time"].(arrow.Timestamp)).
|
||||
ToTime(arrow.TimeUnit(arrow.Nanosecond)).
|
||||
time := (row["time"].(time.Time)).
|
||||
Format(time.RFC3339)
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t%.1f\t%.1f\n",
|
||||
time, row["room"], row["co"], row["hum"], row["temp"])
|
||||
|
|
@ -236,8 +234,7 @@ import (
|
|||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
|
||||
"github.com/apache/arrow/go/v13/arrow"
|
||||
"github.com/InfluxCommunity/influxdb3-go/v2/influxdb3"
|
||||
)
|
||||
|
||||
func InfluxQL() error {
|
||||
|
|
@ -289,10 +286,9 @@ func InfluxQL() error {
|
|||
fmt.Fprintln(w, "Process each row as key-value pairs:")
|
||||
for iterator2.Next() {
|
||||
row := iterator2.Value()
|
||||
// Use Go arrow and time packages to format unix timestamp
|
||||
// Use Go time package to format unix timestamp
|
||||
// as a time with timezone layout (RFC3339)
|
||||
time := (row["time"].(arrow.Timestamp)).
|
||||
ToTime(arrow.TimeUnit(arrow.Nanosecond)).
|
||||
time := (row["time"].(time.Time)).
|
||||
Format(time.RFC3339)
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t%.1f\t%.1f\n",
|
||||
time, row["room"], row["co"], row["hum"], row["temp"])
|
||||
|
|
|
|||
|
|
@ -257,8 +257,7 @@ import (
|
|||
"os"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
"github.com/apache/arrow/go/v14/arrow"
|
||||
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
|
||||
"github.com/InfluxCommunity/influxdb3-go/v2/influxdb3"
|
||||
)
|
||||
|
||||
func Query(query string, parameters influxdb3.QueryParameters,
|
||||
|
|
@ -301,10 +300,10 @@ func Query(query string, parameters influxdb3.QueryParameters,
|
|||
// Process each row as key-value pairs.
|
||||
for iterator.Next() {
|
||||
row := iterator.Value()
|
||||
// Use Go arrow and time packages to format unix timestamp
|
||||
// Use Go time package to format unix timestamp
|
||||
// as a time with timezone layout (RFC3339 format)
|
||||
time := (row["time"].(arrow.Timestamp)).
|
||||
ToTime(arrow.Nanosecond).Format(time.RFC3339)
|
||||
time := (row["time"].(time.Time)).
|
||||
Format(time.RFC3339)
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t%.1f\t%.1f\n",
|
||||
time, row["room"], row["co"], row["hum"], row["temp"])
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ list_code_example: |
|
|||
##### Aggregate by time-based intervals
|
||||
```sql
|
||||
SELECT
|
||||
DATE_BIN(INTERVAL '1 hour', time, '2022-01-01T00:00:00Z'::TIMESTAMP) AS time,
|
||||
DATE_BIN(INTERVAL '1 hour', time, '2022-01-01T00:00:00Z') AS time,
|
||||
mean(field1),
|
||||
sum(field2),
|
||||
tag1
|
||||
|
|
@ -206,7 +206,7 @@ groups:
|
|||
|
||||
```sql
|
||||
SELECT
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z'::TIMESTAMP) AS time
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z') AS time
|
||||
FROM home
|
||||
...
|
||||
```
|
||||
|
|
@ -225,7 +225,7 @@ groups:
|
|||
|
||||
```sql
|
||||
SELECT
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z'::TIMESTAMP) AS time
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z') AS time
|
||||
...
|
||||
GROUP BY 1, room
|
||||
...
|
||||
|
|
@ -235,7 +235,7 @@ groups:
|
|||
|
||||
```sql
|
||||
SELECT
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z'::TIMESTAMP) AS _time
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z') AS _time
|
||||
FROM home
|
||||
...
|
||||
GROUP BY _time, room
|
||||
|
|
@ -247,7 +247,7 @@ The following example retrieves unique combinations of time intervals and rooms
|
|||
|
||||
```sql
|
||||
SELECT
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z'::TIMESTAMP) AS time,
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z') AS time,
|
||||
room,
|
||||
selector_max(temp, time)['value'] AS 'max temp',
|
||||
selector_min(temp, time)['value'] AS 'min temp',
|
||||
|
|
@ -288,7 +288,7 @@ If you want to reference a calculated time column by name, use an alias differen
|
|||
|
||||
```sql
|
||||
SELECT
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z'::TIMESTAMP)
|
||||
DATE_BIN(INTERVAL '2 hours', time, '1970-01-01T00:00:00Z')
|
||||
AS _time,
|
||||
room,
|
||||
selector_max(temp, time)['value'] AS 'max temp',
|
||||
|
|
|
|||
|
|
@ -254,8 +254,7 @@ import (
|
|||
"os"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
"github.com/apache/arrow/go/v14/arrow"
|
||||
"github.com/InfluxCommunity/influxdb3-go/influxdb3"
|
||||
"github.com/InfluxCommunity/influxdb3-go/v2/influxdb3"
|
||||
)
|
||||
|
||||
func Query(query string, parameters influxdb3.QueryParameters) error {
|
||||
|
|
@ -297,10 +296,10 @@ func Query(query string, parameters influxdb3.QueryParameters) error {
|
|||
// Process each row as key-value pairs.
|
||||
for iterator.Next() {
|
||||
row := iterator.Value()
|
||||
// Use Go arrow and time packages to format unix timestamp
|
||||
// Use Go time package to format unix timestamp
|
||||
// as a time with timezone layout (RFC3339 format)
|
||||
time := (row["time"].(arrow.Timestamp)).
|
||||
ToTime(arrow.Nanosecond).Format(time.RFC3339)
|
||||
time := (row["time"].(time.Time)).
|
||||
Format(time.RFC3339)
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t%.1f\t%.1f\n",
|
||||
time, row["room"], row["co"], row["hum"], row["temp"])
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ By learning how to generate and interpret reports for the query plan,
|
|||
you can better understand how the query is executed and identify bottlenecks that affect the performance of your query.
|
||||
|
||||
For example, if the query plan reveals that your query reads a large number of Parquet files,
|
||||
you can then take steps to [optimize your query](/influxdb/cloud-dedicated/query-data/optimize-queries/), such as add filters to read less data or
|
||||
you can then take steps to [optimize your query](/influxdb/cloud-dedicated/query-data/troubleshoot-and-optimize/optimize-queries/), such as add filters to read less data or
|
||||
configure your cluster to store fewer and larger files.
|
||||
|
||||
- [Use EXPLAIN keywords to view a query plan](#use-explain-keywords-to-view-a-query-plan)
|
||||
|
|
@ -113,7 +113,7 @@ Plans are in _tree format_--each plan is an upside-down tree in which
|
|||
execution and data flow from _leaf nodes_, the innermost steps in the plan, to outer _branch nodes_.
|
||||
Whether reading a logical or physical plan, keep the following in mind:
|
||||
|
||||
- Start at the the _leaf nodes_ and read upward.
|
||||
- Start at the _leaf nodes_ and read upward.
|
||||
- At the top of the plan, the _root node_ represents the final, encompassing step.
|
||||
|
||||
In a [physical plan](/influxdb/cloud-dedicated/reference/internals/query-plan/#physical-plan), each step is an [`ExecutionPlan` node](/influxdb/cloud-dedicated/reference/internals/query-plan/#execution-plan-nodes) that receives expressions for input data and output requirements, and computes a partition of data.
|
||||
|
|
@ -770,4 +770,4 @@ Operator structure for aggregating, sorting, and final output.
|
|||
- `SortPreservingMergeExec: [city@0 ASC NULLS LAST]`: Merges and sorts the four sorted streams for the final output.
|
||||
|
||||
In the preceding examples, the `EXPLAIN` report shows the query plan without executing the query.
|
||||
To view runtime metrics, such as execution time for a plan and its operators, use [`EXPLAIN ANALYZE`](/influxdb/cloud-dedicated/reference/sql/explain/#explain-analyze) to generate the report and [tracing](/influxdb/cloud-dedicated/query-data/optimize-queries/#enable-trace-logging) for further debugging, if necessary.
|
||||
To view runtime metrics, such as execution time for a plan and its operators, use [`EXPLAIN ANALYZE`](/influxdb/cloud-dedicated/reference/sql/explain/#explain-analyze) to generate the report and [tracing](/influxdb/cloud-dedicated/query-data/troubleshoot-and-optimize/optimize-queries/#enable-trace-logging) for further debugging, if necessary.
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue