Merge branch 'master' into v3-java-reference-examples
commit
09d4956293
|
@ -0,0 +1,10 @@
|
|||
extends: spelling
|
||||
message: "Did you really mean '%s'?"
|
||||
level: warning
|
||||
ignore:
|
||||
# Ignore the following words. All words are case-insensitive.
|
||||
# To use case-sensitive matching, use the filters section or vocabulary Terms.
|
||||
- InfluxDBv2/Terms/query-functions.txt
|
||||
- InfluxDBv2/Terms/server-config-options.txt
|
||||
filters:
|
||||
- '(\[|`)ui-disabled(`|\])'
|
|
@ -0,0 +1,76 @@
|
|||
|
||||
|
||||
# InfluxQL
|
||||
# is_scalar_math_function
|
||||
# Source: https://github.com/influxdata/influxdb_iox/blob/4f9c901dcfece5fcc4d17cfecb6ec45a0dccda5a/influxdb_influxql_parser/src/functions.rs
|
||||
abs
|
||||
sin
|
||||
cos
|
||||
tan
|
||||
asin
|
||||
acos
|
||||
acosh
|
||||
asinh
|
||||
atan
|
||||
atanh
|
||||
atan2
|
||||
cbrt
|
||||
exp
|
||||
gcd
|
||||
isnan
|
||||
iszero
|
||||
lcm
|
||||
log
|
||||
ln
|
||||
log2
|
||||
log10
|
||||
nanvl
|
||||
sqrt
|
||||
pow
|
||||
floor
|
||||
ceil
|
||||
round
|
||||
|
||||
# InfluxQL operators
|
||||
bitfield
|
||||
|
||||
# is_aggregate_function
|
||||
# Source: https://github.com/influxdata/influxdb_iox/blob/4f9c901dcfece5fcc4d17cfecb6ec45a0dccda5a/influxdb_influxql_parser/src/functions.rs
|
||||
approx_distinct
|
||||
approx_median
|
||||
approx_percentile_cont
|
||||
approx_percentile_cont_with_weight
|
||||
covar
|
||||
cumulative_sum
|
||||
derivative
|
||||
difference
|
||||
elapsed
|
||||
moving_average
|
||||
non_negative_derivative
|
||||
non_negative_difference
|
||||
bottom
|
||||
first
|
||||
last
|
||||
max
|
||||
min
|
||||
percentile
|
||||
sample
|
||||
top
|
||||
count
|
||||
integral
|
||||
mean
|
||||
median
|
||||
mode
|
||||
spread
|
||||
stddev
|
||||
sum
|
||||
holt_winters
|
||||
holt_winters_with_fit
|
||||
chande_momentum_oscillator
|
||||
exponential_moving_average
|
||||
double_exponential_moving_average
|
||||
kaufmans_efficiency_ratio
|
||||
kaufmans_adaptive_moving_average
|
||||
triple_exponential_moving_average
|
||||
triple_exponential_derivative
|
||||
relative_strength_index
|
|
@ -0,0 +1,70 @@
|
|||
assets-path
|
||||
bolt-path
|
||||
e2e-testing
|
||||
engine-path
|
||||
feature-flags
|
||||
flux-log-enabled
|
||||
hardening-enabled
|
||||
http-bind-address
|
||||
http-idle-timeout
|
||||
http-read-header-timeout
|
||||
http-read-timeout
|
||||
http-write-timeout
|
||||
influxql-max-select-buckets
|
||||
influxql-max-select-point
|
||||
influxql-max-select-series
|
||||
instance-id
|
||||
log-level
|
||||
metrics-disabled
|
||||
nats-max-payload-bytes
|
||||
nats-port
|
||||
no-tasks
|
||||
pprof-disabled
|
||||
query-concurrency
|
||||
query-initial-memory-bytes
|
||||
query-max-memory-bytes
|
||||
query-memory-bytes
|
||||
query-queue-size
|
||||
reporting-disabled
|
||||
secret-store
|
||||
session-length
|
||||
session-renew-disabled
|
||||
sqlite-path
|
||||
storage-cache-max-memory-size
|
||||
storage-cache-snapshot-memory-size
|
||||
storage-cache-snapshot-write-cold-duration
|
||||
storage-compact-full-write-cold-duration
|
||||
storage-compact-throughput-burst
|
||||
storage-max-concurrent-compactions
|
||||
storage-max-index-log-file-size
|
||||
storage-no-validate-field-size
|
||||
storage-retention-check-interval
|
||||
storage-series-file-max-concurrent-snapshot-compactions
|
||||
storage-series-id-set-cache-size
|
||||
storage-shard-precreator-advance-period
|
||||
storage-shard-precreator-check-interval
|
||||
storage-tsm-use-madv-willneed
|
||||
storage-validate-keys
|
||||
storage-wal-fsync-delay
|
||||
storage-wal-max-concurrent-writes
|
||||
storage-wal-max-write-delay
|
||||
storage-write-timeout
|
||||
store
|
||||
strong-passwords
|
||||
testing-always-allow-setup
|
||||
tls-cert
|
||||
tls-key
|
||||
tls-min-version
|
||||
tls-strict-ciphers
|
||||
tracing-type
|
||||
ui-disabled
|
||||
vault-addr
|
||||
vault-cacert
|
||||
vault-capath
|
||||
vault-client-cert
|
||||
vault-client-key
|
||||
vault-client-timeout
|
||||
vault-max-retries
|
||||
vault-skip-verify
|
||||
vault-tls-server-name
|
||||
vault-token
|
|
@ -38,7 +38,7 @@ SQLAlchemy
|
|||
Splunk
|
||||
[Ss]uperset
|
||||
TBs?
|
||||
UI
|
||||
\bUI\b
|
||||
URL
|
||||
US (East|West|Central|North|South|Northeast|Northwest|Southeast|Southwest)
|
||||
Unix
|
||||
|
|
|
@ -15,6 +15,7 @@ RUN apt-get update && apt-get upgrade -y && apt-get install -y \
|
|||
curl \
|
||||
git \
|
||||
gpg \
|
||||
influxdb2 \
|
||||
influxdb2-cli \
|
||||
influxctl \
|
||||
jq \
|
||||
|
@ -41,6 +42,9 @@ ENV PYTHONUNBUFFERED=1
|
|||
|
||||
WORKDIR /app
|
||||
|
||||
RUN mkdir -p /app/log && chmod +w /app/log
|
||||
RUN mkdir -p /app/assets && chmod +w /app/assets
|
||||
|
||||
# Some Python test dependencies (pytest-dotenv and pytest-codeblocks) aren't
|
||||
# available as packages in apt-cache, so use pip to download dependencies in a # separate step and use Docker's caching.
|
||||
# Pytest configuration file.
|
||||
|
@ -62,6 +66,8 @@ RUN echo '#!/bin/bash' > /usr/local/bin/xdg-open \
|
|||
&& echo 'echo "$1" > /shared/urls.txt' >> /usr/local/bin/xdg-open \
|
||||
&& chmod +x /usr/local/bin/xdg-open
|
||||
|
||||
RUN service influxdb start
|
||||
|
||||
# Copy test scripts and make them executable.
|
||||
COPY --chmod=755 ./test/scripts/parse_yaml.sh /usr/local/bin/parse_yaml
|
||||
|
||||
|
|
|
@ -3,7 +3,9 @@ title: Work with Prometheus
|
|||
description: >
|
||||
Flux provides tools for scraping and processing raw [Prometheus-formatted metrics](https://prometheus.io/docs/concepts/data_model/)
|
||||
from an HTTP-accessible endpoint.
|
||||
menu: flux_0_x
|
||||
menu:
|
||||
flux_v0:
|
||||
name: Work with Prometheus
|
||||
weight: 8
|
||||
flux/v0/tags: [prometheus]
|
||||
---
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
title: Query data sources
|
||||
description: >
|
||||
Query different data sources with Flux including InfluxDB, SQL databases, CSV, and Prometheus.
|
||||
menu: flux_0_x
|
||||
menu:
|
||||
flux_v0:
|
||||
name: Query data sources
|
||||
weight: 5
|
||||
---
|
||||
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
title: Write to data sources
|
||||
description: >
|
||||
Write to different data sources with Flux including InfluxDB, SQL databases, CSV, and Prometheus.
|
||||
menu: flux_0_x
|
||||
menu:
|
||||
flux_v0:
|
||||
name: Write to data sources
|
||||
weight: 5
|
||||
---
|
||||
|
||||
|
|
|
@ -282,16 +282,16 @@ status = None
|
|||
# Define callbacks for write responses
|
||||
def success(self, data: str):
|
||||
status = "Success writing batch: data: {data}"
|
||||
assert status.startsWith('Success'), f"Expected {status} to be success"
|
||||
assert status.startswith('Success'), f"Expected {status} to be success"
|
||||
|
||||
def error(self, data: str, err: InfluxDBError):
|
||||
status = f"Error writing batch: config: {self}, data: {data}, error: {err}"
|
||||
assert status.startsWith('Success'), f"Expected {status} to be success"
|
||||
assert status.startswith('Success'), f"Expected {status} to be success"
|
||||
|
||||
|
||||
def retry(self, data: str, err: InfluxDBError):
|
||||
status = f"Retry error writing batch: config: {self}, data: {data}, error: {err}"
|
||||
assert status.startsWith('Success'), f"Expected {status} to be success"
|
||||
assert status.startswith('Success'), f"Expected {status} to be success"
|
||||
|
||||
# Instantiate WriteOptions for batching
|
||||
write_options = WriteOptions()
|
||||
|
|
|
@ -254,16 +254,16 @@ status = None
|
|||
# Define callbacks for write responses
|
||||
def success(self, data: str):
|
||||
status = "Success writing batch: data: {data}"
|
||||
assert status.startsWith('Success'), f"Expected {status} to be success"
|
||||
assert status.startswith('Success'), f"Expected {status} to be success"
|
||||
|
||||
def error(self, data: str, err: InfluxDBError):
|
||||
status = f"Error writing batch: config: {self}, data: {data}, error: {err}"
|
||||
assert status.startsWith('Success'), f"Expected {status} to be success"
|
||||
assert status.startswith('Success'), f"Expected {status} to be success"
|
||||
|
||||
|
||||
def retry(self, data: str, err: InfluxDBError):
|
||||
status = f"Retry error writing batch: config: {self}, data: {data}, error: {err}"
|
||||
assert status.startsWith('Success'), f"Expected {status} to be success"
|
||||
assert status.startswith('Success'), f"Expected {status} to be success"
|
||||
|
||||
# Instantiate WriteOptions for batching
|
||||
write_options = WriteOptions()
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
title: Configure your InfluxDB cluster
|
||||
description: >
|
||||
InfluxDB Clustered deployments are managed using Kubernetes and configured using
|
||||
a YAML configuration file.
|
||||
menu:
|
||||
influxdb_clustered:
|
||||
name: Configure your cluster
|
||||
parent: Install InfluxDB Clustered
|
||||
weight: 130
|
||||
related:
|
||||
- /influxdb/clustered/admin/upgrade/
|
||||
---
|
||||
|
||||
InfluxDB Clustered deployments are managed using Kubernetes and configured using
|
||||
a YAML configuration file.
|
||||
Apply configuration settings to your cluster by editing and applying a
|
||||
Kubernetes custom resource (CRD) called `AppInstance`.
|
||||
The AppInstance CRD is defined in a YAML file (use example-customer.yml as a
|
||||
template) or, if using the InfluxDB Clustered Helm chart, is provided by the
|
||||
chart and configured in a `values.yaml` file.
|
||||
|
||||
Use one of the following methods to configure your InfluxDB cluster:
|
||||
|
||||
{{< children type="anchored-list" >}}
|
||||
|
||||
{{< children >}}
|
|
@ -1,19 +1,19 @@
|
|||
---
|
||||
title: Configure your InfluxDB cluster
|
||||
title: Use the InfluxDB AppInstance resource configuration
|
||||
list_title: Configure your InfluxDB AppInstance resource directly
|
||||
description: >
|
||||
InfluxDB Clustered deployments are managed using Kubernetes and configured using
|
||||
a YAML configuration file.
|
||||
Configure your InfluxDB cluster by editing configuration options in
|
||||
the provided `AppInstance` resource.
|
||||
menu:
|
||||
menu:
|
||||
influxdb_clustered:
|
||||
name: Configure your cluster
|
||||
parent: Install InfluxDB Clustered
|
||||
weight: 130
|
||||
related:
|
||||
- /influxdb/clustered/admin/upgrade/
|
||||
name: Configure AppInstance
|
||||
parent: Configure your cluster
|
||||
weight: 220
|
||||
---
|
||||
|
||||
InfluxDB Clustered deployments are managed using Kubernetes and configured using
|
||||
a YAML configuration file. InfluxData provides the following items:
|
||||
Manage your InfluxDB Clustered deployments using Kubernetes and apply configuration settings using
|
||||
a YAML configuration file.
|
||||
|
||||
- **`influxdb-docker-config.json`**: an authenticated Docker configuration file.
|
||||
The InfluxDB Clustered software is in a secure container registry.
|
||||
|
@ -28,7 +28,8 @@ a YAML configuration file. InfluxData provides the following items:
|
|||
|
||||
{{% note %}}
|
||||
|
||||
This documentation refers to a `myinfluxdb.yml` file that you copy from `example-customer.yml` and edit for your InfluxDB cluster.
|
||||
This documentation refers to a `myinfluxdb.yml` file that you copy from
|
||||
`example-customer.yml` and edit for your InfluxDB cluster.
|
||||
|
||||
{{% /note %}}
|
||||
|
||||
|
@ -62,7 +63,9 @@ The InfluxDB installation, update, and upgrade processes are driven by editing
|
|||
and applying a [Kubernetes custom resource (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
|
||||
called `AppInstance`.
|
||||
The `AppInstance` CRD is defined in a YAML file (use `example-customer.yml` as a
|
||||
template) that contains key information, such as:
|
||||
template).
|
||||
|
||||
The `AppInstance` resource contains key information, such as:
|
||||
|
||||
- Name of the target namespace
|
||||
- Version of the InfluxDB package
|
||||
|
@ -101,7 +104,7 @@ InfluxData provides an `app-instance-schema.json` JSON schema file that VS Code
|
|||
|
||||
### Create a namespace for InfluxDB
|
||||
|
||||
Create a namespace for InfluxDB. For example, using `kubectl`::
|
||||
Create a namespace for InfluxDB--for example, enter the following `kubectl` command in your terminal:
|
||||
|
||||
```sh
|
||||
kubectl create namespace influxdb
|
||||
|
@ -130,9 +133,9 @@ container images required to run InfluxDB Clustered.
|
|||
Your Kubernetes Cluster needs access to the container registry to pull down and
|
||||
install InfluxDB.
|
||||
|
||||
There are two main scenarios:
|
||||
When pulling InfluxDB Clustered images, there are two main scenarios:
|
||||
|
||||
- You have a kubernetes cluster that can pull from the InfluxData container registry.
|
||||
- You have a Kubernetes cluster that can pull from the InfluxData container registry.
|
||||
- You run in an environment with no network interfaces ("air-gapped") and you
|
||||
can only access a private container registry.
|
||||
|
|
@ -0,0 +1,964 @@
|
|||
---
|
||||
title: Configure your InfluxDB cluster using Helm
|
||||
description: >
|
||||
Use Helm to configure and deploy your InfluxDB Clustered `AppInstance` resource.
|
||||
menu:
|
||||
influxdb_clustered:
|
||||
name: Use Helm
|
||||
parent: Configure your cluster
|
||||
weight: 230
|
||||
---
|
||||
|
||||
Manage your InfluxDB Clustered deployments using Kubernetes and apply configuration settings using a YAML configuration file.
|
||||
The [InfluxDB Clustered Helm chart](https://github.com/influxdata/helm-charts/tree/master/charts/influxdb3-clustered)
|
||||
provides an alternative method for deploying your InfluxDB cluster using
|
||||
[Helm](https://helm.sh/). It acts as a wrapper for the InfluxDB `AppInstance`
|
||||
resource. When using Helm, apply configuration options in a
|
||||
a `values.yaml` on your local machine.
|
||||
|
||||
InfluxData provides the following items:
|
||||
|
||||
- **`influxdb-docker-config.json`**: an authenticated Docker configuration file.
|
||||
The InfluxDB Clustered software is in a secure container registry.
|
||||
This file grants access to the collection of container images required to
|
||||
install InfluxDB Clustered.
|
||||
|
||||
---
|
||||
|
||||
## Configuration data
|
||||
|
||||
When ready to install InfluxDB, have the following information available:
|
||||
|
||||
- **InfluxDB cluster hostname**: the hostname Kubernetes uses to expose InfluxDB API endpoints
|
||||
- **PostgreSQL-style data source name (DSN)**: used to access your
|
||||
PostgreSQL-compatible database that stores the InfluxDB Catalog.
|
||||
- **Object store credentials** _(AWS S3 or S3-compatible)_
|
||||
- Endpoint URL
|
||||
- Access key
|
||||
- Bucket name
|
||||
- Region (required for S3, may not be required for other object stores)
|
||||
- **Local storage information** _(for ingester pods)_
|
||||
- Storage class
|
||||
- Storage size
|
||||
- **OAuth2 provider credentials**
|
||||
- Client ID
|
||||
- JWKS endpoint
|
||||
- Device authorization endpoint
|
||||
- Token endpoint
|
||||
|
||||
InfluxDB is deployed to a Kubernetes namespace which, throughout the following
|
||||
installation procedure, is referred to as the _target_ namespace.
|
||||
For simplicity, we assume this namespace is `influxdb`, however
|
||||
you may use any name you like.
|
||||
|
||||
### AppInstance resource
|
||||
|
||||
The InfluxDB installation, update, and upgrade processes are driven by editing
|
||||
and applying a [Kubernetes custom resource (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
|
||||
called `AppInstance`.
|
||||
The `AppInstance` CRD is included in the InfluxDB Clustered Helm chart and can
|
||||
be configured by applying custom settings in the `values.yaml` included in the
|
||||
chart.
|
||||
|
||||
The `AppInstance` resource contains key information, such as:
|
||||
|
||||
- Name of the target namespace
|
||||
- Version of the InfluxDB package
|
||||
- Reference to the InfluxDB container registry pull secrets
|
||||
- Hostname where the InfluxDB API is exposed
|
||||
- Parameters to connect to [external prerequisites](/influxdb/clustered/install/prerequisites/)
|
||||
|
||||
### Kubit operator
|
||||
|
||||
The InfluxDB Clustered Helm chart also includes the
|
||||
[`kubecfg kubit` operator](https://github.com/kubecfg/kubit) (maintained by InfluxData)
|
||||
which simplifies the installation and management of the InfluxDB Clustered package.
|
||||
It manages the application of the jsonnet templates used to install, manage, and
|
||||
update an InfluxDB cluster.
|
||||
|
||||
## Configure your cluster
|
||||
|
||||
1. [Install Helm](#install-helm)
|
||||
2. [Create a values.yaml file](#create-a-valuesyaml-file)
|
||||
3. [Create a namespace for InfluxDB](#create-a-namespace-for-influxdb)
|
||||
4. [Configure access to the InfluxDB container registry](#configure-access-to-the-influxdb-container-registry)
|
||||
5. [Set up cluster ingress](#set-up-cluster-ingress)
|
||||
6. [Modify the configuration file to point to prerequisites](#modify-the-configuration-file-to-point-to-prerequisites)
|
||||
7. [Provide a custom certificate authority bundle](#provide-a-custom-certificate-authority-bundle)
|
||||
<em class="op65">(Optional)</em>
|
||||
|
||||
### Install Helm
|
||||
|
||||
If you haven't already, [install Helm](https://helm.sh/docs/intro/install/) on
|
||||
your local machine.
|
||||
|
||||
### Create a values.yaml file
|
||||
|
||||
Download or copy the base `values.yaml` for the InfluxDB Clustered Helm chart
|
||||
from GitHub and store it locally. For example--if using cURL:
|
||||
|
||||
```bash
|
||||
curl -O https://raw.githubusercontent.com/influxdata/helm-charts/master/charts/influxdb3-clustered/values.yaml
|
||||
```
|
||||
|
||||
Or you can copy the default `values.yaml` from GitHub:
|
||||
|
||||
<a href="https://github.com/influxdata/helm-charts/blob/master/charts/influxdb3-clustered/values.yaml" class="btn github">View values.yaml on GitHub</a>
|
||||
|
||||
### Create a namespace for InfluxDB
|
||||
|
||||
Create a namespace for InfluxDB. For example, using `kubectl`:
|
||||
|
||||
```sh
|
||||
kubectl create namespace influxdb
|
||||
```
|
||||
|
||||
If you use a namespace name other than `influxdb`, update the `namespaceOverride`
|
||||
field in your `values.yaml` to use your custom namespace name.
|
||||
|
||||
### Configure access to the InfluxDB container registry
|
||||
|
||||
The provided `influxdb-docker-config.json` grants access to a collection of
|
||||
container images required to run InfluxDB Clustered.
|
||||
Your Kubernetes Cluster needs access to the container registry to pull down and
|
||||
install InfluxDB.
|
||||
|
||||
When pulling images, there are two main scenarios:
|
||||
|
||||
- You have a Kubernetes cluster that can pull from the InfluxData container registry.
|
||||
- You run in an environment with no network interfaces ("air-gapped") and you
|
||||
can only access a private container registry.
|
||||
|
||||
In both scenarios, you need a valid container registry secret file.
|
||||
Use [crane](https://github.com/google/go-containerregistry/tree/main/cmd/crane)
|
||||
to create a container registry secret file.
|
||||
|
||||
1. [Install crane](https://github.com/google/go-containerregistry/tree/main/cmd/crane#installation)
|
||||
2. Use the following command to create a container registry secret file and
|
||||
retrieve the necessary secrets:
|
||||
|
||||
{{% code-placeholders "PACKAGE_VERSION" %}}
|
||||
|
||||
```sh
|
||||
mkdir /tmp/influxdbsecret
|
||||
cp influxdb-docker-config.json /tmp/influxdbsecret/config.json
|
||||
DOCKER_CONFIG=/tmp/influxdbsecret \
|
||||
crane manifest \
|
||||
us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:PACKAGE_VERSION
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
---
|
||||
|
||||
Replace {{% code-placeholder-key %}}`PACKAGE_VERSION`{{% /code-placeholder-key %}}
|
||||
with your InfluxDB Clustered package version.
|
||||
|
||||
---
|
||||
|
||||
If your Docker configuration is valid and you’re able to connect to the container
|
||||
registry, the command succeeds and the output is the JSON manifest for the Docker
|
||||
image, similar to the following:
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
{{% expand "View JSON manifest" %}}
|
||||
|
||||
```json
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.kubecfg.bundle.config.v1+json",
|
||||
"digest": "sha256:6900d2f248e678176c68f3768e7e48958bb96a59232070ff31b3b018cf299aa7",
|
||||
"size": 8598
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.kubecfg.bundle.tar+gzip",
|
||||
"digest": "sha256:7c1d62e76287035a9b22b2c155f328fae9beff2c6aa7a09a2dd2697539f41d98",
|
||||
"size": 404059
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"org.opencontainers.image.created": "1970-01-01T00:00:00Z",
|
||||
"org.opencontainers.image.revision": "unknown",
|
||||
"org.opencontainers.image.source": "kubecfg pack"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
If there’s a problem with the Docker configuration, crane won't retrieve the
|
||||
manifest and the output is similar to the following error:
|
||||
|
||||
```sh
|
||||
Error: fetching manifest us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:<package-version>: GET https://us-docker.pkg.dev/v2/token?scope=repository%3Ainfluxdb2-artifacts%2Fclustered%2Finfluxdb%3Apull&service=: DENIED: Permission "artifactregistry.repositories.downloadArtifacts" denied on resource "projects/influxdb2-artifacts/locations/us/repositories/clustered" (or it may not exist)
|
||||
```
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[Public registry (non-air-gapped)](#)
|
||||
[Private registry (air-gapped)](#)
|
||||
{{% /tabs %}}
|
||||
|
||||
{{% tab-content %}}
|
||||
|
||||
<!--------------------------- BEGIN Public Registry --------------------------->
|
||||
|
||||
#### Public registry (non-air-gapped)
|
||||
|
||||
To pull from the InfluxData registry, you need to create a Kubernetes secret in the target namespace.
|
||||
|
||||
```sh
|
||||
kubectl create secret docker-registry gar-docker-secret \
|
||||
--from-file=.dockerconfigjson=influxdb-docker-config.json \
|
||||
--namespace influxdb
|
||||
```
|
||||
|
||||
If successful, the output is the following:
|
||||
|
||||
```text
|
||||
secret/gar-docker-secret created
|
||||
```
|
||||
|
||||
By default, this secret is named `gar-docker-secret`.
|
||||
If you change the name of this secret, you must also change the value of the
|
||||
`imagePullSecrets.name` field in your `values.yaml`.
|
||||
|
||||
<!---------------------------- END Public Registry ---------------------------->
|
||||
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
<!--------------------------- BEGIN Private Registry -------------------------->
|
||||
|
||||
#### Private registry (air-gapped)
|
||||
|
||||
If your Kubernetes cluster can't use a public network to download container images
|
||||
from our container registry, do the following:
|
||||
|
||||
1. Copy the images from the InfluxDB registry to your own private registry.
|
||||
2. Configure your `AppInstance` resource with a reference to your private
|
||||
registry name.
|
||||
3. Provide credentials to your private registry.
|
||||
|
||||
The list of images that you need to copy is included in the package metadata.
|
||||
You can obtain it with any standard OCI image inspection tool. For example:
|
||||
|
||||
{{% code-placeholders "PACKAGE_VERSION" %}}
|
||||
|
||||
```sh
|
||||
DOCKER_CONFIG=/tmp/influxdbsecret \
|
||||
crane config \
|
||||
us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:PACKAGE_VERSION \
|
||||
| jq -r '.metadata["oci.image.list"].images[]' \
|
||||
> /tmp/images.txt
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
The output is a list of image names, similar to the following:
|
||||
|
||||
```
|
||||
us-docker.pkg.dev/influxdb2-artifacts/idpe/idpe-cd-ioxauth@sha256:5f015a7f28a816df706b66d59cb9d6f087d24614f485610619f0e3a808a73864
|
||||
us-docker.pkg.dev/influxdb2-artifacts/iox/iox@sha256:b59d80add235f29b806badf7410239a3176bc77cf2dc335a1b07ab68615b870c
|
||||
...
|
||||
```
|
||||
|
||||
Use `crane` to copy the images to your private registry:
|
||||
|
||||
{{% code-placeholders "REGISTRY_HOSTNAME" %}}
|
||||
|
||||
```sh
|
||||
</tmp/images.txt xargs -I% crane cp % REGISTRY_HOSTNAME/%
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
---
|
||||
|
||||
Replace {{% code-placeholder-key %}}`REGISTRY_HOSTNAME`{{% /code-placeholder-key %}}
|
||||
with the hostname of your private registry--for example:
|
||||
|
||||
```text
|
||||
myregistry.mydomain.io
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Set the
|
||||
`images.registryOverride` field in your `values.yaml` to the location of your
|
||||
private registry--for example:
|
||||
|
||||
{{% code-placeholders "REGISTRY_HOSTNAME" %}}
|
||||
|
||||
```yml
|
||||
images:
|
||||
registryOverride: REGISTRY_HOSTNAME
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
<!---------------------------- END Private Registry --------------------------->
|
||||
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
### Set up cluster ingress
|
||||
|
||||
{{% note %}}
|
||||
InfluxDB Clustered components use gRPC/HTTP2 protocols. If using an external load balancer,
|
||||
you may need to explicitly enable these protocols on your load balancers.
|
||||
{{% /note %}}
|
||||
|
||||
The InfluxDB Clustered Helm chart includes the
|
||||
[Kubernetes Nginx Ingress Controller](https://github.com/kubernetes/ingress-nginx).
|
||||
Add a valid TLS Certificate to the cluster as a secret.
|
||||
Provide the paths to the TLS certificate file and key file:
|
||||
|
||||
{{% code-placeholders "TLS_(CERT|KEY)_PATH" %}}
|
||||
|
||||
```sh
|
||||
kubectl create secret tls ingress-tls \
|
||||
--namespace influxdb \
|
||||
--cert TLS_CERT_PATH \
|
||||
--key TLS_KEY_PATH
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
---
|
||||
|
||||
Replace the following:
|
||||
|
||||
- _{{% code-placeholder-key %}}`TLS_CERT_PATH`{{% /code-placeholder-key %}}:
|
||||
Path to the certificate file on your local machine._
|
||||
- _{{% code-placeholder-key %}}`TLS_KEY_PATH`{{% /code-placeholder-key %}}:
|
||||
Path to the certificate secret key file on your local machine._
|
||||
|
||||
---
|
||||
|
||||
Provide the TLS certificate secret to the InfluxDB configuration in the
|
||||
[Configure ingress step](#configure-ingress).
|
||||
|
||||
### Modify the configuration file to point to prerequisites
|
||||
|
||||
Update your `values.yaml` file with credentials necessary to connect your
|
||||
cluster to your prerequisites.
|
||||
|
||||
- [Configure ingress](#configure-ingress)
|
||||
- [Configure the object store](#configure-the-object-store)
|
||||
- [Configure the catalog database](#configure-the-catalog-database)
|
||||
- [Configure local storage for ingesters](#configure-local-storage-for-ingesters)
|
||||
- [Configure your OAuth2 provider](#configure-your-oauth2-provider)
|
||||
- [Configure the size of your cluster](#configure-the-size-of-your-cluster)
|
||||
|
||||
#### Configure ingress
|
||||
|
||||
To configure ingress, provide values for the following fields in your
|
||||
`values.yaml`:
|
||||
|
||||
- **`ingress.hosts`: Cluster hostnames**
|
||||
|
||||
Provide the hostnames that Kubernetes should use to expose the InfluxDB API
|
||||
endpoints--for example: `{{< influxdb/host >}}`.
|
||||
|
||||
_You can provide multiple hostnames. The ingress layer accepts incoming
|
||||
requests for all listed hostnames. This can be useful if you want to have
|
||||
distinct paths for your internal and external traffic._
|
||||
|
||||
{{% note %}}
|
||||
You are responsible for configuring and managing DNS. Options include:
|
||||
|
||||
- Manually managing DNS records
|
||||
- Using [external-dns](https://github.com/kubernetes-sigs/external-dns) to
|
||||
synchronize exposed Kubernetes services and ingresses with DNS providers.
|
||||
{{% /note %}}
|
||||
|
||||
- **`ingress.tlsSecretName`: TLS certificate secret name**
|
||||
|
||||
Provide the name of the secret that
|
||||
[contains your TLS certificate and key](#set-up-cluster-ingress).
|
||||
The examples in this guide use the name `ingress-tls`.
|
||||
|
||||
_The `tlsSecretName` field is optional. You may want to use it if you already
|
||||
have a TLS certificate for your DNS name._
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
{{% expand "Use cert-manager and Let's Encrypt to manage TLS certificates" %}}
|
||||
|
||||
If you instead want to automatically create an [ACME](https://datatracker.ietf.org/doc/html/rfc8555)
|
||||
certificate (for example, using [Let's Encrypt](https://letsencrypt.org/)), refer
|
||||
to the [cert-manager documentation](https://cert-manager.io/docs/usage/ingress/).
|
||||
In `ingress.tlsSecretName`, provide a name for the secret it should create.
|
||||
|
||||
{{% note %}}
|
||||
If you choose to use cert-manager, it's your responsibility to install and configure it.
|
||||
{{% /note %}}
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
{{% code-callout "ingress-tls|cluster-host\.com" "green" %}}
|
||||
|
||||
```yaml
|
||||
ingress:
|
||||
hosts:
|
||||
- {{< influxdb/host >}}
|
||||
tlsSecretName: ingress-tls
|
||||
```
|
||||
|
||||
{{% /code-callout %}}
|
||||
|
||||
#### Configure the object store
|
||||
|
||||
To connect your InfluxDB cluster to your object store. The information required
|
||||
to connect to your object store depends on your object storage provider.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[Amazon S3 or S3-compatible](#)
|
||||
[Azure Blob Storage](#)
|
||||
[Google Cloud Storage](#)
|
||||
{{% /tabs %}}
|
||||
|
||||
{{% tab-content %}}
|
||||
|
||||
<!---------------------------------- BEGIN S3 --------------------------------->
|
||||
|
||||
If using Amazon S3 or an S3-compatible object store, provide values for the
|
||||
following fields in your `values.yaml`:
|
||||
|
||||
- `objectStore`
|
||||
- `bucket`: Object storage bucket name
|
||||
- `s3`:
|
||||
- `endpoint`: Object storage endpoint URL
|
||||
- `allowHttp`: _Set to `true` to allow unencrypted HTTP connections_
|
||||
- `accessKey.value`: Object storage access key
|
||||
_(can use a `value` literal or `valueFrom` to retrieve the value from a secret)_
|
||||
- `secretKey.value`: Object storage secret key
|
||||
_(can use a `value` literal or `valueFrom` to retrieve the value from a secret)_
|
||||
- `region`: Object storage region
|
||||
|
||||
{{% code-placeholders "S3_(URL|ACCESS_KEY|SECRET_KEY|BUCKET_NAME|REGION)" %}}
|
||||
|
||||
```yml
|
||||
objectStore:
|
||||
# Bucket that the Parquet files will be stored in
|
||||
bucket: S3_BUCKET_NAME
|
||||
|
||||
s3:
|
||||
# URL for S3 Compatible object store
|
||||
endpoint: S3_URL
|
||||
|
||||
# Set to true to allow communication over HTTP (instead of HTTPS)
|
||||
allowHttp: 'false'
|
||||
|
||||
# S3 Access Key
|
||||
# This can also be provided as a valueFrom: secretKeyRef:
|
||||
accessKey:
|
||||
value: S3_ACCESS_KEY
|
||||
|
||||
# S3 Secret Key
|
||||
# This can also be provided as a valueFrom: secretKeyRef:
|
||||
secretKey:
|
||||
value: S3_SECRET_KEY
|
||||
|
||||
# This value is required for AWS S3, it may or may not be required for other providers.
|
||||
region: S3_REGION
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
---
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`S3_BUCKET_NAME`{{% /code-placeholder-key %}}: Object storage bucket name
|
||||
- {{% code-placeholder-key %}}`S3_URL`{{% /code-placeholder-key %}}: Object storage endpoint URL
|
||||
- {{% code-placeholder-key %}}`S3_ACCESS_KEY`{{% /code-placeholder-key %}}: Object storage access key
|
||||
- {{% code-placeholder-key %}}`S3_SECRET_KEY`{{% /code-placeholder-key %}}: Object storage secret key
|
||||
- {{% code-placeholder-key %}}`S3_REGION`{{% /code-placeholder-key %}}: Object storage region
|
||||
|
||||
---
|
||||
|
||||
<!----------------------------------- END S3 ---------------------------------->
|
||||
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
<!-------------------------------- BEGIN AZURE -------------------------------->
|
||||
|
||||
If using Azure Blob Storage as your object store, provide values for the
|
||||
following fields in your `values.yaml`:
|
||||
|
||||
- `objectStore`
|
||||
- `bucket`: Azure Blob Storage bucket name
|
||||
- `azure`:
|
||||
- `accessKey.value`: Azure Blob Storage access key
|
||||
_(can use a `value` literal or `valueFrom` to retrieve the value from a secret)_
|
||||
- `account.value`: Azure Blob Storage account ID
|
||||
_(can use a `value` literal or `valueFrom` to retrieve the value from a secret)_
|
||||
|
||||
{{% code-placeholders "AZURE_(BUCKET_NAME|ACCESS_KEY|STORAGE_ACCOUNT)" %}}
|
||||
|
||||
```yml
|
||||
objectStore:
|
||||
# Bucket that the Parquet files will be stored in
|
||||
bucket: AZURE_BUCKET_NAME
|
||||
|
||||
azure:
|
||||
# Azure Blob Storage Access Key
|
||||
# This can also be provided as a valueFrom:
|
||||
accessKey:
|
||||
value: AZURE_ACCESS_KEY
|
||||
|
||||
# Azure Blob Storage Account
|
||||
# This can also be provided as a valueFrom: secretKeyRef:
|
||||
account:
|
||||
value: AZURE_STORAGE_ACCOUNT
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
---
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`AZURE_BUCKET_NAME`{{% /code-placeholder-key %}}: Object storage bucket name
|
||||
- {{% code-placeholder-key %}}`AZURE_ACCESS_KEY`{{% /code-placeholder-key %}}: Azure Blob Storage access key
|
||||
- {{% code-placeholder-key %}}`AZURE_STORAGE_ACCOUNT`{{% /code-placeholder-key %}}: Azure Blob Storage account ID
|
||||
|
||||
---
|
||||
|
||||
<!--------------------------------- END AZURE --------------------------------->
|
||||
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
<!-------------------------------- BEGIN AZURE -------------------------------->
|
||||
|
||||
If using Google Cloud Storage as your object store, provide values for the
|
||||
following fields in your `values.yaml`:
|
||||
|
||||
- `objectStore`
|
||||
- `bucket`: Google Cloud Storage bucket name
|
||||
- `google`:
|
||||
- `serviceAccountSecret.name`: the Kubernetes Secret name that contains your
|
||||
Google IAM service account credentials
|
||||
- `serviceAccountSecret.key`: the key inside of your Google IAM secret that
|
||||
contains your Google IAM account credentials
|
||||
|
||||
{{% code-placeholders "GOOGLE_(BUCKET_NAME|IAM_SECRET|CREDENTIALS_KEY)" %}}
|
||||
|
||||
```yml
|
||||
objectStore:
|
||||
# Bucket that the Parquet files will be stored in
|
||||
bucket: GOOGLE_BUCKET_NAME
|
||||
|
||||
google:
|
||||
# This section is not needed if you are using GKE Workload Identity.
|
||||
# It is only required to use explicit service account secrets (JSON files)
|
||||
serviceAccountSecret:
|
||||
# Kubernetes Secret name containing the credentials for a Google IAM
|
||||
# Service Account.
|
||||
name: GOOGLE_IAM_SECRET
|
||||
# The key within the Secret containing the credentials.
|
||||
key: GOOGLE_CREDENTIALS_KEY
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
---
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`GOOGLE_BUCKET_NAME`{{% /code-placeholder-key %}}:
|
||||
Google Cloud Storage bucket name
|
||||
- {{% code-placeholder-key %}}`GOOGLE_IAM_SECRET`{{% /code-placeholder-key %}}:
|
||||
the Kubernetes Secret name that contains your Google IAM service account
|
||||
credentials
|
||||
- {{% code-placeholder-key %}}`GOOGLE_CREDENTIALS_KEY`{{% /code-placeholder-key %}}:
|
||||
the key inside of your Google IAM secret that contains your Google IAM account
|
||||
credentials
|
||||
|
||||
---
|
||||
|
||||
<!--------------------------------- END AZURE --------------------------------->
|
||||
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
#### Configure the catalog database
|
||||
|
||||
The InfluxDB catalog is a PostgreSQL-compatible relational database that stores
|
||||
metadata about your time series data.
|
||||
To connect your InfluxDB cluster to your PostgreSQL-compatible database,
|
||||
provide values for the following fields in your `values.yaml`:
|
||||
|
||||
{{% note %}}
|
||||
We recommend storing sensitive credentials, such as your PostgreSQL-compatible DSN,
|
||||
as secrets in your Kubernetes cluster.
|
||||
{{% /note %}}
|
||||
|
||||
- `catalog.dsn`
|
||||
- `SecretName`: Secret name
|
||||
- `SecretKey`: Key in the secret that contains the DSN
|
||||
|
||||
{{% code-placeholders "SECRET_(NAME|KEY)" %}}
|
||||
|
||||
```yml
|
||||
catalog:
|
||||
# Secret name and key within the secret containing the dsn string to connect
|
||||
# to the catalog
|
||||
dsn:
|
||||
# Kubernetes Secret name containing the dsn for the catalog.
|
||||
SecretName: SECRET_NAME
|
||||
# The key within the Secret containing the dsn.
|
||||
SecretKey: SECRET_KEY
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
---
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`SECRET_NAME`{{% /code-placeholder-key %}}:
|
||||
Name of the secret containing your PostgreSQL-compatible DSN
|
||||
- {{% code-placeholder-key %}}`SECRET_KEY`{{% /code-placeholder-key %}}:
|
||||
Key in the secret that references your PostgreSQL-compatible DSN
|
||||
|
||||
---
|
||||
|
||||
{{% note %}}
|
||||
|
||||
##### PostgreSQL instances without TLS or SSL
|
||||
|
||||
If your PostgreSQL-compatible instance runs without TLS or SSL, you must include
|
||||
the `sslmode=disable` parameter in the DSN. For example:
|
||||
|
||||
{{% code-callout "sslmode=disable" %}}
|
||||
|
||||
```
|
||||
postgres://username:passw0rd@mydomain:5432/influxdb?sslmode=disable
|
||||
```
|
||||
|
||||
{{% /code-callout %}}
|
||||
{{% /note %}}
|
||||
|
||||
#### Configure local storage for ingesters
|
||||
|
||||
InfluxDB ingesters require local storage to store the Write Ahead Log (WAL) for
|
||||
incoming data.
|
||||
To connect your InfluxDB cluster to local storage, provide values for the
|
||||
following fields in your `values.yaml`:
|
||||
|
||||
- `ingesterStorage`
|
||||
- `storageClassName`: [Kubernetes storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/).
|
||||
This differs based on the Kubernetes environment and desired storage characteristics.
|
||||
- `storage`: Storage size. We recommend a minimum of 2 gibibytes (`2Gi`).
|
||||
|
||||
{{% code-placeholders "STORAGE_(CLASS|SIZE)" %}}
|
||||
|
||||
```yaml
|
||||
ingesterStorage:
|
||||
# (Optional) Set the storage class. This will differ based on the K8s
|
||||
# environment and desired storage characteristics.
|
||||
# If not set, the default storage class will be used.
|
||||
storageClassName: STORAGE_CLASS
|
||||
# Set the storage size (minimum 2Gi recommended)
|
||||
storage: STORAGE_SIZE
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
---
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`STORAGE_CLASS`{{% /code-placeholder-key %}}:
|
||||
[Kubernetes storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
|
||||
- {{% code-placeholder-key %}}`STORAGE_SIZE`{{% /code-placeholder-key %}}:
|
||||
Storage size (example: `2Gi`)
|
||||
|
||||
---
|
||||
|
||||
#### Configure your OAuth2 provider
|
||||
|
||||
InfluxDB Clustered uses OAuth2 to authenticate administrative access to your cluster.
|
||||
To connect your InfluxDB cluster to your OAuth2 provide, provide values for the
|
||||
following fields in your `values.yaml`:
|
||||
|
||||
- `admin`
|
||||
- `identityProvider`: Identity provider name.
|
||||
_If using Microsoft Entra ID (formerly Azure Active Directory), set the name
|
||||
to `azure`_.
|
||||
- `jwksEndpoint`: JWKS endpoint provide by your identity provider.
|
||||
- `users`: List of OAuth2 users to grant administrative access to your
|
||||
InfluxDB cluster. IDs are provided by your identity provider.
|
||||
|
||||
Below are examples for **Keycloak**, **Auth0**, and **Microsoft Entra ID**, but
|
||||
other OAuth2 providers should work as well:
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[Keycloak](#)
|
||||
[Auth0](#)
|
||||
[Microsoft Entra ID](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
{{% code-callout "keycloak" "green" %}}
|
||||
{{% code-placeholders "KEYCLOAK_(HOST|REALM|USER_ID)" %}}
|
||||
|
||||
```yaml
|
||||
admin:
|
||||
# The identity provider to be used e.g. "keycloak", "auth0", "azure", etc
|
||||
# Note for Azure Active Directory it must be exactly "azure"
|
||||
identityProvider: keycloak
|
||||
# The JWKS endpoint provided by the Identity Provider
|
||||
jwksEndpoint: |-
|
||||
https://KEYCLOAK_HOST/auth/realms/KEYCLOAK_REALM/protocol/openid-connect/certs
|
||||
# The list of users to grant access to Clustered via influxctl
|
||||
users:
|
||||
# All fields are required but `firstName`, `lastName`, and `email` can be
|
||||
# arbitrary values. However, `id` must match the user ID provided by Keycloak.
|
||||
- id: KEYCLOAK_USER_ID
|
||||
firstName: Marty
|
||||
lastName: McFly
|
||||
email: mcfly@influxdata.com
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
{{% /code-callout %}}
|
||||
|
||||
---
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`KEYCLOAK_HOST`{{% /code-placeholder-key %}}:
|
||||
Host and port of your Keycloak server
|
||||
- {{% code-placeholder-key %}}`KEYCLOAK_REALM`{{% /code-placeholder-key %}}:
|
||||
Keycloak realm
|
||||
- {{% code-placeholder-key %}}`KEYCLOAK_USER_ID`{{% /code-placeholder-key %}}:
|
||||
Keycloak user ID to grant InfluxDB administrative access to
|
||||
|
||||
---
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
{{% code-callout "auth0" "green" %}}
|
||||
{{% code-placeholders "AUTH0_(HOST|USER_ID)" %}}
|
||||
|
||||
```yaml
|
||||
admin:
|
||||
# The identity provider to be used e.g. "keycloak", "auth0", "azure", etc
|
||||
# Note for Azure Active Directory it must be exactly "azure"
|
||||
identityProvider: auth0
|
||||
# The JWKS endpoint provided by the Identity Provider
|
||||
jwksEndpoint: |-
|
||||
https://AUTH0_HOST/.well-known/openid-configuration
|
||||
# The list of users to grant access to Clustered via influxctl
|
||||
users:
|
||||
- AUTH0_USER_ID
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
{{% /code-callout %}}
|
||||
|
||||
---
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`AUTH0_HOST`{{% /code-placeholder-key %}}:
|
||||
Host and port of your Auth0 server
|
||||
- {{% code-placeholder-key %}}`AUTH0_USER_ID`{{% /code-placeholder-key %}}:
|
||||
Auth0 user ID to grant InfluxDB administrative access to
|
||||
|
||||
---
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
{{% code-callout "azure" "green" %}}
|
||||
{{% code-placeholders "AZURE_(USER|TENANT)_ID" %}}
|
||||
|
||||
```yaml
|
||||
admin:
|
||||
# The identity provider to be used e.g. "keycloak", "auth0", "azure", etc
|
||||
# Note for Azure Active Directory it must be exactly "azure"
|
||||
identityProvider: azure
|
||||
# The JWKS endpoint provided by the Identity Provider
|
||||
jwksEndpoint: |-
|
||||
https://login.microsoftonline.com/AZURE_TENANT_ID/discovery/v2.0/keys
|
||||
# The list of users to grant access to Clustered via influxctl
|
||||
users:
|
||||
- AZURE_USER_ID
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
{{% /code-callout %}}
|
||||
|
||||
---
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`AZURE_TENANT_ID`{{% /code-placeholder-key %}}:
|
||||
Microsoft Entra tenant ID
|
||||
- {{% code-placeholder-key %}}`AZURE_USER_ID`{{% /code-placeholder-key %}}:
|
||||
Microsoft Entra user ID to grant InfluxDB administrative access to
|
||||
_(See [Find user IDs with Microsoft Entra ID](/influxdb/clustered/install/auth/?t=Microsoft+Entra+ID#find-user-ids-with-microsoft-entra-id))_
|
||||
|
||||
---
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
##### Adding users
|
||||
|
||||
Finally, add all the users you wish to have access to use `influxctl`.
|
||||
Update the `admin.users` field with a list of these users.
|
||||
<!-- See [Adding or removing users](/influxdb/clustered/admin/users/) for more details. -->
|
||||
|
||||
#### Configure the size of your cluster
|
||||
|
||||
By default, an InfluxDB cluster is configured with the following:
|
||||
|
||||
- **3 ingesters**:
|
||||
Ensures redundancy on the write path.
|
||||
- **1 compactor**:
|
||||
While you can have multiple compactors, it is more efficient to scale the
|
||||
compactor vertically (assign more CPU and memory) rather than horizontally
|
||||
(increase the number of compactors).
|
||||
- **1 querier**:
|
||||
The optimal number of queriers depends on the number of concurrent queries you are
|
||||
likely to have and how long they take to execute.
|
||||
|
||||
The default values provide a good starting point for testing.
|
||||
Once you have your cluster up and running and are looking for scaling recommendations,
|
||||
please [contact the InfluxData Support team](https://support.influxdata.com).
|
||||
We are happy to work with you to identify appropriate scale settings based on
|
||||
your anticipated workload.
|
||||
|
||||
**To use custom scale settings for your InfluxDB cluster**, modify the following fields
|
||||
in your values.yaml`. If omitted, your cluster will use the default scale settings.
|
||||
|
||||
- `resources`
|
||||
- `ingester.requests`
|
||||
- `cpu`: CPU resource units to assign to ingesters
|
||||
- `memory`: Memory resource units to assign to ingesters
|
||||
- `replicas`: Number of ingester replicas to provision
|
||||
- `compactor.requests`
|
||||
- `cpu`: CPU resource units to assign to compactors
|
||||
- `memory`: Memory resource units to assign to compactors
|
||||
- `replicas`: Number of compactor replicas to provision
|
||||
- `querier.requests`
|
||||
- `cpu`: CPU resource units to assign to queriers
|
||||
- `memory`: Memory resource units to assign to queriers
|
||||
- `replicas`: Number of querier replicas to provision
|
||||
- `router.requests`
|
||||
- `cpu`: CPU resource units to assign to routers
|
||||
- `memory`: Memory resource units to assign to routers
|
||||
- `replicas`: Number of router replicas to provision
|
||||
|
||||
###### Related Kubernetes documentation
|
||||
|
||||
- [CPU resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu)
|
||||
- [Memory resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory)
|
||||
|
||||
{{% code-placeholders "(INGESTER|COMPACTOR|QUERIER|ROUTER)_(CPU|MEMORY|REPLICAS)" %}}
|
||||
|
||||
```yml
|
||||
# The following settings tune the various pods for their cpu/memory/replicas
|
||||
# based on workload needs. Only uncomment the specific resources you want
|
||||
# to change. Anything left commented will use the package default.
|
||||
resources:
|
||||
# The ingester handles data being written
|
||||
ingester:
|
||||
requests:
|
||||
cpu: INGESTER_CPU
|
||||
memory: INGESTER_MEMORY
|
||||
replicas: INGESTER_REPLICAS # Default is 3
|
||||
|
||||
# The compactor reorganizes old data to improve query and storage efficiency.
|
||||
compactor:
|
||||
requests:
|
||||
cpu: COMPACTOR_CPU
|
||||
memory: COMPACTOR_MEMORY
|
||||
replicas: COMPACTOR_REPLICAS # Default is 1
|
||||
|
||||
# The querier handles querying data.
|
||||
querier:
|
||||
requests:
|
||||
cpu: QUERIER_CPU
|
||||
memory: QUERIER_MEMORY
|
||||
replicas: QUERIER_REPLICAS # Default is 1
|
||||
|
||||
# The router performs some api routing.
|
||||
router:
|
||||
requests:
|
||||
cpu: ROUTER_CPU
|
||||
memory: ROUTER_MEMORY
|
||||
replicas: ROUTER_REPLICAS # Default is 1
|
||||
```
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
### Provide a custom certificate authority bundle {note="Optional"}
|
||||
|
||||
InfluxDB attempts to make TLS connections to the services it depends on; notably
|
||||
the [Catalog](/influxdb/clustered/reference/internals/storage-engine/#catalog),
|
||||
and the [Object store](/influxdb/clustered/reference/internals/storage-engine/#object-store).
|
||||
InfluxDB validates the certificates for all of the connections it makes.
|
||||
|
||||
**If you host these services yourself and you use a private or otherwise not
|
||||
well-known certificate authority to issue certificates to theses services**,
|
||||
InfluxDB will not recognize the issuer and will be unable to validate the certificates.
|
||||
To allow InfluxDB to validate these certificates, provide a PEM certificate
|
||||
bundle containing your custom certificate authority chain.
|
||||
|
||||
1. Use `kubectl` to create a config map containing your PEM bundle.
|
||||
Your certificate authority administrator should provide you with a
|
||||
PEM-formatted certificate bundle file.
|
||||
|
||||
{{% note %}}
|
||||
This PEM-formatted bundle file is *not* the certificate that InfluxDB uses to
|
||||
host its own TLS endpoints. This bundle establishes a chain of trust for the
|
||||
external services that InfluxDB depends on.
|
||||
{{% /note %}}
|
||||
|
||||
In the example below, `private_ca.pem` is the certificate bundle file.
|
||||
|
||||
```sh
|
||||
kubectl --namespace influxdb create configmap custom-ca --from-file=certs.pem=/path/to/private_ca.pem
|
||||
```
|
||||
|
||||
{{% note %}}
|
||||
It's possible to append multiple certificates into the same bundle.
|
||||
This can help if you need to include intermediate certificates or explicitly
|
||||
include leaf certificates. Leaf certificates should be included before any
|
||||
intermediate certificates they depend on. The root certificate should
|
||||
be last in the bundle.
|
||||
{{% /note %}}
|
||||
|
||||
2. Update your `values.yaml` to enable custom egress and refer to your
|
||||
certificate authority config map. Set `useCustomEgress` to `true` and update
|
||||
the `egress` property to refer to that config map. For example:
|
||||
|
||||
```yml
|
||||
useCustomEgress: true
|
||||
egress:
|
||||
# # If you're using a custom CA you will need to specify the full custom CA bundle here.
|
||||
# #
|
||||
# # NOTE: the custom CA is currently only honoured for outbound requests used to obtain
|
||||
# # the JWT public keys from your identiy provider (see `jwksEndpoint`).
|
||||
customCertificates:
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: ca.pem
|
||||
name: custom-ca
|
||||
```
|
||||
|
||||
{{< page-nav prev="/influxdb/clustered/install/auth/" prevText="Set up authentication" next="/influxdb/clustered/install/licensing" nextText="Install your license" tab="Helm" >}}
|
|
@ -23,16 +23,16 @@ following tools:
|
|||
|
||||
InfluxDB Clustered uses an `AppInstance` Kubernetes custom resource (CR) to
|
||||
configure and deploy your InfluxDB Cluster.
|
||||
Installing a `CustomResourceDefinition` (CRD) requires cluster-wide permissions and may cause `kubectl` to
|
||||
fail if you do not have those permissions in your cluster.
|
||||
Installing a `CustomResourceDefinition` (CRD) requires cluster-wide permissions
|
||||
and may cause `kubectl` to fail if you do not have those permissions in your cluster.
|
||||
|
||||
`kubectl` uses your local credentials to install the `AppInstance` CRD.
|
||||
If you do not have the necessary permissions, you can
|
||||
[use the `kubit` CLI to manually install the package in your cluster](?t=kubit#kubectl-or-kubit).
|
||||
[use the `kubit` CLI to manually install the package in your cluster](?t=kubit#kubectl-kubit-helm).
|
||||
|
||||
{{% note %}}
|
||||
**If you meet any of the following criteria,
|
||||
[install and use the `kubit` CLI](?t=kubit#kubectl-or-kubit)
|
||||
[install and use the `kubit` CLI](?t=kubit#kubectl-kubit-helm)
|
||||
on your local machine. This allows you to act as the operator would and deploy
|
||||
your cluster, but from your terminal.**
|
||||
|
||||
|
@ -44,14 +44,19 @@ your cluster, but from your terminal.**
|
|||
- You do not want to run the operator in your Kubernetes cluster.
|
||||
{{% /note %}}
|
||||
|
||||
<!-- Hidden anchor for links to the kubectl/kubit tabs -->
|
||||
You can also use [Helm](https://helm.sh/) and the
|
||||
[InfluxDB Clustered Helm chart](https://github.com/influxdata/helm-charts/tree/master/charts/influxdb3-clustered)
|
||||
to deploy your InfluxDB cluster.
|
||||
|
||||
<span id="kubectl-or-kubit"></span>
|
||||
<!-- Hidden anchor for links to the kubectl/kubit/helm tabs -->
|
||||
|
||||
<span id="kubectl-kubit-helm"></span>
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[kubectl](#)
|
||||
[kubit](#)
|
||||
[Helm](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
|
@ -91,6 +96,30 @@ for tool dependencies, meaning the required versions are tracked by `kubit`.
|
|||
|
||||
<!--------------------------------- END kubit --------------------------------->
|
||||
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
|
||||
<!-------------------------------- BEGIN Helm --------------------------------->
|
||||
|
||||
1. Add the InfluxData Helm chart repository:
|
||||
|
||||
```bash
|
||||
helm repo add influxdata https://helm.influxdata.com/
|
||||
```
|
||||
|
||||
2. Deploy your Helm chart using your modified local `values.yaml`:
|
||||
|
||||
```bash
|
||||
helm upgrade \
|
||||
--install \
|
||||
influxdb \
|
||||
influxdata/influxdb3-clustered \
|
||||
-f ./values.yml \
|
||||
--namespace influxdb
|
||||
```
|
||||
|
||||
<!--------------------------------- END Helm ---------------------------------->
|
||||
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ description: >
|
|||
Clustered software.
|
||||
menu:
|
||||
influxdb_clustered:
|
||||
name: Install your License
|
||||
name: Install your license
|
||||
parent: Install InfluxDB Clustered
|
||||
weight: 135
|
||||
influxdb/clustered/tags: [licensing]
|
||||
|
@ -47,20 +47,44 @@ To deactivate license enforcement, remove the `useLicensedBinaries` feature flag
|
|||
```
|
||||
|
||||
4. <span id="enable-feature-flag"></span>
|
||||
Update your `AppInstance` resource to enable the `useLicensedBinaries` feature flag.
|
||||
Add the `useLicensedBinaries` entry to the `.spec.package.spec.featureFlags`
|
||||
property--for example:
|
||||
Update your `AppInstance` resource to activate the `useLicensedBinaries` feature flag:
|
||||
|
||||
- If configuring the `AppInstance` resource directly, add the
|
||||
`useLicensedBinaries` entry to the `.spec.package.spec.featureFlags`
|
||||
property.
|
||||
- If using the [InfluxDB Clustered Helm chart](https://github.com/influxdata/helm-charts/tree/master/charts/influxdb3-clustered), add the `useLicensedBinaries` entry to the
|
||||
`featureFlags` property in your `values.yaml`.
|
||||
|
||||
```yml
|
||||
apiVersion: kubecfg.dev/v1alpha1
|
||||
kind: AppInstance
|
||||
# ...
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[AppInstance](#)
|
||||
[Helm](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
```yml
|
||||
apiVersion: kubecfg.dev/v1alpha1
|
||||
kind: AppInstance
|
||||
# ...
|
||||
spec:
|
||||
package:
|
||||
spec:
|
||||
package:
|
||||
spec:
|
||||
featureFlags:
|
||||
- useLicensedBinaries
|
||||
```
|
||||
featureFlags:
|
||||
- useLicensedBinaries
|
||||
```
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
```yml
|
||||
# values.yaml
|
||||
|
||||
featureFlags:
|
||||
- useLicensedBinaries
|
||||
```
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
InfluxDB Clustered detects the `License` resource and extracts the credentials
|
||||
into a secret required by InfluxDB Clustered Kubernetes pods.
|
||||
|
@ -73,8 +97,12 @@ If you are currently using a non-licensed preview release of InfluxDB Clustered
|
|||
and want to upgrade to a licensed release, do the following:
|
||||
|
||||
1. [Install an InfluxDB license](#install-your-influxdb-license)
|
||||
2. In your `myinfluxdb.yml`, update the package version defined in
|
||||
`spec.package.image` to use a licensed release.
|
||||
2. If you [use the `AppInstance` resource configuration](/influxdb/clustered/install/configure-cluster/directly/) to configure your cluster, in your `myinfluxdb.yml`,
|
||||
update the package version defined in `spec.package.image` to use a licensed
|
||||
release.
|
||||
|
||||
If using the InfluxDB Clustered Helm chart, update the `image.tag` property
|
||||
in your `values.yaml`to use a licensed release.
|
||||
|
||||
{{% warn %}}
|
||||
#### Upgrade to checkpoint releases first
|
||||
|
@ -88,6 +116,13 @@ corrupt or lost data.
|
|||
|
||||
{{% code-placeholders "PACKAGE_VERSION" %}}
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[AppInstance](#)
|
||||
[Helm](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
```yml
|
||||
apiVersion: kubecfg.dev/v1alpha1
|
||||
kind: AppInstance
|
||||
|
@ -98,6 +133,19 @@ spec:
|
|||
image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:PACKAGE_VERSION
|
||||
```
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
|
||||
```yml
|
||||
# values.yaml
|
||||
|
||||
image:
|
||||
tag: PACKAGE_VERSION
|
||||
```
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
Replace {{% code-placeholder-key %}}`PACKAGE_VERSION`{{% /code-placeholder-key %}} with
|
||||
|
@ -182,4 +230,4 @@ kubectl logs deployment/license-controller --namespace NAMESPACE
|
|||
{{% /code-placeholders %}}
|
||||
|
||||
|
||||
{{< page-nav prev="/influxdb/clustered/install/configure-cluster/" prevText="Configure your cluster" next="/influxdb/clustered/install/deploy/" nextText="Deploy your cluster" >}}
|
||||
{{< page-nav prev="/influxdb/clustered/install/configure-cluster/" prevText="Configure your cluster" next="/influxdb/clustered/install/deploy/" nextText="Deploy your cluster" keepTab=true >}}
|
||||
|
|
|
@ -9,6 +9,16 @@ menu:
|
|||
weight: 301
|
||||
---
|
||||
|
||||
{{% warn %}}
|
||||
|
||||
#### Doesn't work with InfluxDB Clustered
|
||||
|
||||
The `influxctl cluster list` command won't work with {{% product-name %}}.
|
||||
To retrieve cluster information, use the [`influxctl cluster get <CLUSTER_ID>`
|
||||
command](/influxdb/clustered/reference/cli/influxctl/cluster/get/).
|
||||
|
||||
{{% /warn %}}
|
||||
|
||||
The `influxctl cluster list` command returns information about all InfluxDB
|
||||
clusters associated with your account ID.
|
||||
|
||||
|
|
|
@ -282,16 +282,16 @@ status = None
|
|||
# Define callbacks for write responses
|
||||
def success(self, data: str):
|
||||
status = "Success writing batch: data: {data}"
|
||||
assert status.startsWith('Success'), f"Expected {status} to be success"
|
||||
assert status.startswith('Success'), f"Expected {status} to be success"
|
||||
|
||||
def error(self, data: str, err: InfluxDBError):
|
||||
status = f"Error writing batch: config: {self}, data: {data}, error: {err}"
|
||||
assert status.startsWith('Success'), f"Expected {status} to be success"
|
||||
assert status.startswith('Success'), f"Expected {status} to be success"
|
||||
|
||||
|
||||
def retry(self, data: str, err: InfluxDBError):
|
||||
status = f"Retry error writing batch: config: {self}, data: {data}, error: {err}"
|
||||
assert status.startsWith('Success'), f"Expected {status} to be success"
|
||||
assert status.startswith('Success'), f"Expected {status} to be success"
|
||||
|
||||
# Instantiate WriteOptions for batching
|
||||
write_options = WriteOptions()
|
||||
|
|
|
@ -13,6 +13,14 @@ related:
|
|||
- /influxdb/clustered/reference/sql/information-schema/
|
||||
---
|
||||
|
||||
{{% warn %}}
|
||||
Queries of InfluxDB system tables may affect production performance while
|
||||
system tables are accessed.
|
||||
|
||||
System tables are not currently part of the stable API and the schema may change
|
||||
in subsequent releases.
|
||||
{{% /warn %}}
|
||||
|
||||
InfluxDB system measurements contain time series data used by and generated from the
|
||||
InfluxDB internal monitoring system.
|
||||
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
StylesPath = "../../../.ci/vale/styles"
|
||||
|
||||
Vocab = InfluxDataDocs
|
||||
|
||||
MinAlertLevel = warning
|
||||
|
||||
Packages = Google, write-good, Hugo
|
||||
|
||||
[*.md]
|
||||
BasedOnStyles = Vale, InfluxDBv2, InfluxDataDocs, Google, write-good
|
||||
|
||||
Google.Acronyms = NO
|
||||
Google.DateFormat = NO
|
||||
Google.Ellipses = NO
|
||||
Google.Headings = NO
|
||||
Google.WordList = NO
|
||||
Vale.Spelling = NO
|
|
@ -34,6 +34,23 @@ Server configuration commands require an [Operator token](/influxdb/v2/admin/tok
|
|||
Use the [`influx server-config` command](/influxdb/v2/reference/cli/influx/server-config/)
|
||||
to retrieve your runtime server configuration.
|
||||
|
||||
<!--test: setup
|
||||
|
||||
```sh
|
||||
service influxdb start && \
|
||||
influx setup \
|
||||
--username admin \
|
||||
--password adminpassword \
|
||||
--token admintoken \
|
||||
--org influxdatadocs \
|
||||
--bucket home \
|
||||
--force && INFLUX_TOKEN=admintoken \
|
||||
```
|
||||
|
||||
-->
|
||||
|
||||
<!-- pytest-codeblocks:cont -->
|
||||
|
||||
```sh
|
||||
influx server-config
|
||||
```
|
||||
|
@ -202,10 +219,24 @@ _Typically, InfluxData internal use only._
|
|||
| `--assets-path` | `INFLUXD_ASSETS_PATH` | `assets-path` |
|
||||
|
||||
###### influxd flag
|
||||
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --assets-path=/path/to/custom/assets-dir
|
||||
```
|
||||
|
||||
<!--test-actual
|
||||
|
||||
```sh
|
||||
service influxdb stop && \
|
||||
touch /app/log/test.influxd.log && \
|
||||
influxd --assets-path=/app/assets \
|
||||
> /app/log/test.influxd.log 2>&1 &
|
||||
```
|
||||
|
||||
-->
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_ASSETS_PATH=/path/to/custom/assets-dir
|
||||
|
@ -252,6 +283,8 @@ user information, UI data, REST resources, and other key value data.
|
|||
| `--bolt-path` | `INFLUXD_BOLT_PATH` | `bolt-path` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --bolt-path=~/.influxdbv2/influxd.bolt
|
||||
```
|
||||
|
@ -298,6 +331,8 @@ InfluxData uses this endpoint in end-to-end testing.
|
|||
| `--e2e-testing` | `INFLUXD_E2E_TESTING` | `e2e-testing` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --e2e-testing
|
||||
```
|
||||
|
@ -346,6 +381,8 @@ Time-Structure Merge Tree (TSM) data on disk.
|
|||
| `--engine-path` | `INFLUXD_ENGINE_PATH` | `engine-path` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --engine-path=~/.influxdbv2/engine
|
||||
```
|
||||
|
@ -396,6 +433,8 @@ intended for internal use only.
|
|||
| `--feature-flags` | `INFLUXD_FEATURE_FLAGS` | `feature-flags` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --feature-flags flag1=value2,flag2=value2
|
||||
```
|
||||
|
@ -461,6 +500,8 @@ Include option to show detailed logs for Flux queries, including the following l
|
|||
| `--flux-log-enabled` | `INFLUXD_FLUX_LOG_ENABLED` | `flux-log-enabled` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --flux-log-enabled
|
||||
```
|
||||
|
@ -508,6 +549,8 @@ in InfluxDB.
|
|||
| `--hardening-enabled` | `INFLUXD_HARDENING_ENABLED` | `hardening-enabled` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --hardening-enabled
|
||||
```
|
||||
|
@ -556,6 +599,8 @@ Customize the URL and port for the InfluxDB API and UI.
|
|||
| `--http-bind-address` | `INFLUXD_HTTP_BIND_ADDRESS` | `http-bind-address` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --http-bind-address=:8086
|
||||
```
|
||||
|
@ -604,6 +649,8 @@ Set to `0` for no timeout.
|
|||
| `--http-idle-timeout` | `INFLUXD_HTTP_IDLE_TIMEOUT` | `http-idle-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --http-idle-timeout=3m0s
|
||||
```
|
||||
|
@ -652,6 +699,8 @@ Set to `0` for no timeout.
|
|||
| `--http-read-header-timeout` | `INFLUXD_HTTP_READ_HEADER_TIMEOUT` | `http-read-header-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --http-read-header-timeout=10s
|
||||
```
|
||||
|
@ -708,6 +757,8 @@ potentially hurt performance.
|
|||
| `--http-read-timeout` | `INFLUXD_HTTP_READ_TIMEOUT` | `http-read-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --http-read-timeout=10s
|
||||
```
|
||||
|
@ -746,7 +797,11 @@ http-read-timeout = "10s"
|
|||
---
|
||||
|
||||
### http-write-timeout
|
||||
Maximum duration the server should spend processing and responding to write requests.
|
||||
Maximum duration to wait before timing out writes of the response.
|
||||
It doesn't let Handlers decide the duration on a per-request basis.
|
||||
|
||||
The timeout is reset when a new request's header is read.
|
||||
|
||||
Set to `0` for no timeout.
|
||||
|
||||
**Default:** `0`
|
||||
|
@ -764,10 +819,23 @@ potentially hurt performance.
|
|||
| `--http-write-timeout` | `INFLUXD_HTTP_WRITE_TIMEOUT` | `http-write-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --http-write-timeout=10s
|
||||
```
|
||||
|
||||
<!--test-actual
|
||||
|
||||
```sh
|
||||
service influxdb stop && \
|
||||
touch /app/log/test.influxd.log && \
|
||||
influxd --http-write-timeout=10s \
|
||||
> /app/log/test.influxd.log 2>&1 &
|
||||
```
|
||||
|
||||
-->
|
||||
|
||||
###### Environment variable
|
||||
```sh
|
||||
export INFLUXD_HTTP_WRITE_TIMEOUT=10s
|
||||
|
@ -812,6 +880,10 @@ Maximum number of group by time buckets a `SELECT` statement can create.
|
|||
| `--influxql-max-select-buckets` | `INFLUXD_INFLUXQL_MAX_SELECT_BUCKETS` | `influxql-max-select-buckets` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --influxql-max-select-buckets=0
|
||||
```
|
||||
|
@ -861,6 +933,8 @@ InfluxDB checks the point count every second (so queries exceeding the maximum a
|
|||
| `--influxql-max-select-point` | `INFLUXD_INFLUXQL_MAX_SELECT_POINT` | `influxql-max-select-point` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --influxql-max-select-point=0
|
||||
```
|
||||
|
@ -909,6 +983,10 @@ Maximum number of series a `SELECT` statement can return.
|
|||
| `--influxql-max-select-series` | `INFLUXD_INFLUXQL_MAX_SELECT_SERIES` | `influxql-max-select-series` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --influxql-max-select-series=0
|
||||
```
|
||||
|
@ -955,11 +1033,15 @@ Identifies edge nodes during replication, and prevents collisions if two edge no
|
|||
| `--instance-id` | `INFLUXD_INSTANCE_ID` | `instance-id` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --instance-id=:8086
|
||||
```
|
||||
|
||||
###### Environment variable
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
export INFLUXD_INSTANCE_ID=:8086
|
||||
```
|
||||
|
@ -1004,6 +1086,10 @@ InfluxDB outputs log entries with severity levels greater than or equal to the l
|
|||
| `--log-level` | `INFLUXD_LOG_LEVEL` | `log-level` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --log-level=info
|
||||
```
|
||||
|
@ -1051,6 +1137,10 @@ Disable the HTTP `/metrics` endpoint which exposes [internal InfluxDB metrics](/
|
|||
| `--metrics-disabled` | `INFLUXD_METRICS_DISABLED` | `metrics-disabled` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --metrics-disabled
|
||||
```
|
||||
|
@ -1103,6 +1193,8 @@ Maximum number of bytes allowed in a NATS message payload.
|
|||
| `--nats-max-payload-bytes` | `INFLUXD_NATS_MAX_PAYLOAD_BYTES` | `nats-max-payload-bytes` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --nats-max-payload-bytes=1048576
|
||||
```
|
||||
|
@ -1155,6 +1247,8 @@ Port for the NATS streaming server. `-1` selects a random port.
|
|||
| `--nats-port` | `INFLUXD_NATS_PORT` | `nats-port` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --nats-port=-1
|
||||
```
|
||||
|
@ -1204,6 +1298,8 @@ InfluxDB without scheduling or executing tasks.
|
|||
| `--no-tasks` | `INFLUXD_NO_TASKS` | `no-tasks` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --no-tasks
|
||||
```
|
||||
|
@ -1252,6 +1348,8 @@ This endpoint provides runtime profiling data and can be helpful when debugging.
|
|||
| `--pprof-disabled` | `INFLUXD_PPROF_DISABLED` | `pprof-disabled` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --pprof-disabled
|
||||
```
|
||||
|
@ -1300,6 +1398,8 @@ Setting to `0` allows an unlimited number of concurrent queries.
|
|||
| `--query-concurrency` | `INFLUXD_QUERY_CONCURRENCY` | `query-concurrency` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --query-concurrency=10
|
||||
```
|
||||
|
@ -1347,6 +1447,8 @@ Initial bytes of memory allocated for a query.
|
|||
| `--query-initial-memory-bytes` | `INFLUXD_QUERY_INITIAL_MEMORY_BYTES` | `query-initial-memory-bytes` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --query-initial-memory-bytes=10485760
|
||||
```
|
||||
|
@ -1394,6 +1496,8 @@ Maximum total bytes of memory allowed for queries.
|
|||
| `--query-max-memory-bytes` | `INFLUXD_QUERY_MAX_MEMORY_BYTES` | `query-max-memory-bytes` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --query-max-memory-bytes=104857600
|
||||
```
|
||||
|
@ -1445,6 +1549,8 @@ Must be greater than or equal to [query-initial-memory-bytes](#query-initial-mem
|
|||
| `--query-memory-bytes` | `INFLUXD_QUERY_MEMORY_BYTES` | `query-memory-bytes` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --query-memory-bytes=10485760
|
||||
```
|
||||
|
@ -1494,6 +1600,8 @@ Setting to `0` allows an unlimited number of queries in the queue.
|
|||
| `--query-queue-size` | `INFLUXD_QUERY_QUEUE_SIZE` | `query-queue-size` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --query-queue-size=10
|
||||
```
|
||||
|
@ -1543,6 +1651,8 @@ information about what data is collected and how InfluxData uses it.
|
|||
| `--reporting-disabled` | `INFLUXD_REPORTING_DISABLED` | `reporting-disabled` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --reporting-disabled
|
||||
```
|
||||
|
@ -1593,6 +1703,8 @@ or in [Vault](https://www.vaultproject.io/).
|
|||
| `--secret-store` | `INFLUXD_SECRET_STORE` | `secret-store` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --secret-store=bolt
|
||||
```
|
||||
|
@ -1640,6 +1752,8 @@ Specifies the Time to Live (TTL) **in minutes** for newly created user sessions.
|
|||
| `--session-length` | `INFLUXD_SESSION_LENGTH` | `session-length` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --session-length=60
|
||||
```
|
||||
|
@ -1690,6 +1804,8 @@ and the user is redirected to the login page, even if recently active.
|
|||
| `--session-renew-disabled` | `INFLUXD_SESSION_RENEW_DISABLED` | `session-renew-disabled` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --session-renew-disabled
|
||||
```
|
||||
|
@ -1739,6 +1855,8 @@ The SQLite database is used to store metadata for notebooks and annotations.
|
|||
| `--sqlite-path` | `INFLUXD_SQLITE_PATH` | `sqlite-path` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --sqlite-path ~/.influxdbv2/influxd.sqlite
|
||||
```
|
||||
|
@ -1786,6 +1904,8 @@ Maximum size (in bytes) a shard's cache can reach before it starts rejecting wri
|
|||
| `--storage-cache-max-memory-size` | `INFLUXD_STORAGE_CACHE_MAX_MEMORY_SIZE` | `storage-cache-max-memory-size` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-cache-max-memory-size=1073741824
|
||||
```
|
||||
|
@ -1834,6 +1954,8 @@ and write it to a TSM file to make more memory available.
|
|||
| `--storage-cache-snapshot-memory-size` | `INFLUXD_STORAGE_CACHE_SNAPSHOT_MEMORY_SIZE` | `storage-cache-snapshot-memory-size` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-cache-snapshot-memory-size=26214400
|
||||
```
|
||||
|
@ -1882,6 +2004,8 @@ write it to a new TSM file if the shard hasn't received writes or deletes.
|
|||
| `--storage-cache-snapshot-write-cold-duration` | `INFLUXD_STORAGE_CACHE_SNAPSHOT_WRITE_COLD_DURATION` | `storage-cache-snapshot-write-cold-duration` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-cache-snapshot-write-cold-duration=10m0s
|
||||
```
|
||||
|
@ -1930,6 +2054,8 @@ shard if it hasn't received writes or deletes.
|
|||
| `--storage-compact-full-write-cold-duration` | `INFLUXD_STORAGE_COMPACT_FULL_WRITE_COLD_DURATION` | `storage-compact-full-write-cold-duration` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-compact-full-write-cold-duration=4h0m0s
|
||||
```
|
||||
|
@ -1977,6 +2103,8 @@ Rate limit (in bytes per second) that TSM compactions can write to disk.
|
|||
| `--storage-compact-throughput-burst` | `INFLUXD_STORAGE_COMPACT_THROUGHPUT_BURST` | `storage-compact-throughput-burst` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-compact-throughput-burst=50331648
|
||||
```
|
||||
|
@ -2027,6 +2155,8 @@ _This setting does not apply to cache snapshotting._
|
|||
| `--storage-max-concurrent-compactions` | `INFLUXD_STORAGE_MAX_CONCURRENT_COMPACTIONS` | `storage-max-concurrent-compactions` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-max-concurrent-compactions=0
|
||||
```
|
||||
|
@ -2076,6 +2206,8 @@ heap usage at the expense of write throughput.
|
|||
| `--storage-max-index-log-file-size` | `INFLUXD_STORAGE_MAX_INDEX_LOG_FILE_SIZE` | `storage-max-index-log-file-size` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-max-index-log-file-size=1048576
|
||||
```
|
||||
|
@ -2123,6 +2255,8 @@ Skip field size validation on incoming write requests.
|
|||
| `--storage-no-validate-field-size` | `INFLUXD_STORAGE_NO_VALIDATE_FIELD_SIZE` | `storage-no-validate-field-size` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-no-validate-field-size
|
||||
```
|
||||
|
@ -2170,6 +2304,8 @@ Interval of retention policy enforcement checks.
|
|||
| `--storage-retention-check-interval` | `INFLUXD_STORAGE_RETENTION_CHECK_INTERVAL` | `storage-retention-check-interval` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-retention-check-interval=30m0s
|
||||
```
|
||||
|
@ -2218,6 +2354,8 @@ all series partitions in a database.
|
|||
| `--storage-series-file-max-concurrent-snapshot-compactions` | `INFLUXD_STORAGE_SERIES_FILE_MAX_CONCURRENT_SNAPSHOT_COMPACTIONS` | `storage-series-file-max-concurrent-snapshot-compactions` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-series-file-max-concurrent-snapshot-compactions=0
|
||||
```
|
||||
|
@ -2275,6 +2413,8 @@ An increase in cache size may lead to an increase in heap usage.
|
|||
| `--storage-series-id-set-cache-size` | `INFLUXD_STORAGE_SERIES_ID_SET_CACHE_SIZE` | `storage-series-id-set-cache-size` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-series-id-set-cache-size=100
|
||||
```
|
||||
|
@ -2322,6 +2462,8 @@ The time before a shard group's end-time that the successor shard group is creat
|
|||
| `--storage-shard-precreator-advance-period` | `INFLUXD_STORAGE_SHARD_PRECREATOR_ADVANCE_PERIOD` | `storage-shard-precreator-advance-period` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-shard-precreator-advance-period=30m0s
|
||||
```
|
||||
|
@ -2369,6 +2511,8 @@ Interval of pre-create new shards check.
|
|||
| `--storage-shard-precreator-check-interval` | `INFLUXD_STORAGE_SHARD_PRECREATOR_CHECK_INTERVAL` | `storage-shard-precreator-check-interval` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-shard-precreator-check-interval=10m0s
|
||||
```
|
||||
|
@ -2416,6 +2560,8 @@ Inform the kernel that InfluxDB intends to page in mmap'd sections of TSM files.
|
|||
| `--storage-tsm-use-madv-willneed` | `INFLUXD_STORAGE_TSM_USE_MADV_WILLNEED` | `storage-tsm-use-madv-willneed` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-tsm-use-madv-willneed
|
||||
```
|
||||
|
@ -2463,6 +2609,8 @@ Validate incoming writes to ensure keys have only valid unicode characters.
|
|||
| `--storage-validate-keys` | `INFLUXD_STORAGE_VALIDATE_KEYS` | `storage-validate-keys` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-validate-keys
|
||||
```
|
||||
|
@ -2512,6 +2660,8 @@ This is useful for slower disks or when WAL write contention is present.
|
|||
| `--storage-wal-fsync-delay` | `INFLUXD_STORAGE_WAL_FSYNC_DELAY` | `storage-wal-fsync-delay` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-wal-fsync-delay=0s
|
||||
```
|
||||
|
@ -2559,6 +2709,8 @@ Maximum number writes to the WAL directory to attempt at the same time.
|
|||
| `--storage-wal-max-concurrent-writes` | `INFLUXD_STORAGE_WAL_MAX_CONCURRENT_WRITES` | `storage-wal-max-concurrent-writes` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-wal-max-concurrent-writes=0
|
||||
```
|
||||
|
@ -2608,6 +2760,8 @@ has been met. Set to `0` to disable the timeout.
|
|||
| `--storage-wal-max-write-delay` | `INFLUXD_STORAGE_WAL_MAX_WRITE_DELAY` | `storage-wal-max-write-delay` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-wal-max-write-delay=10m
|
||||
```
|
||||
|
@ -2655,6 +2809,8 @@ Maximum amount of time the storage engine will process a write request before ti
|
|||
| `--storage-write-timeout` | `INFLUXD_STORAGE_WRITE_TIMEOUT` | `storage-write-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --storage-write-timeout=10s
|
||||
```
|
||||
|
@ -2714,6 +2870,8 @@ InfluxData does not recommend using `memory` in production.
|
|||
| `--store` | `INFLUXD_STORE` | `store` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --store=bolt
|
||||
```
|
||||
|
@ -2766,6 +2924,8 @@ at least three of the following four character classes:
|
|||
| `--strong-passwords` | `INFLUXD_STRONG_PASSWORDS` | `strong-passwords` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --strong-passwords
|
||||
```
|
||||
|
@ -2814,6 +2974,8 @@ This configuration option is primarily used in continuous integration tests.
|
|||
| `--testing-always-allow-setup` | `INFLUXD_TESTING_ALWAYS_ALLOW_SETUP` | `testing-always-allow-setup` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --testing-always-allow-setup
|
||||
```
|
||||
|
@ -2862,6 +3024,8 @@ _For more information, see [Enable TLS encryption](/influxdb/v2/admin/security/e
|
|||
| `--tls-cert` | `INFLUXD_TLS_CERT` | `tls-cert` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --tls-cert=/path/to/influxdb.crt
|
||||
```
|
||||
|
@ -2910,6 +3074,8 @@ _For more information, see [Enable TLS encryption](/influxdb/v2/admin/security/e
|
|||
| `--tls-key` | `INFLUXD_TLS_KEY` | `tls-key` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --tls-key=/path/to/influxdb.key
|
||||
```
|
||||
|
@ -2957,6 +3123,8 @@ Minimum accepted TLS version.
|
|||
| `--tls-min-version` | `INFLUXD_TLS_MIN_VERSION` | `tls-min-version` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --tls-min-version=1.2
|
||||
```
|
||||
|
@ -3012,6 +3180,8 @@ Restrict accepted TLS ciphers to:
|
|||
| `--tls-strict-ciphers` | `INFLUXD_TLS_STRICT_CIPHERS` | `tls-strict-ciphers` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --tls-strict-ciphers
|
||||
```
|
||||
|
@ -3060,6 +3230,8 @@ Tracing is disabled by default.
|
|||
| `--tracing-type` | `INFLUXD_TRACING_TYPE` | `tracing-type` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --tracing-type=log
|
||||
```
|
||||
|
@ -3108,6 +3280,8 @@ The UI is enabled by default.
|
|||
| `--ui-disabled` | `INFLUXD_UI_DISABLED` | `ui-disabled` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --ui-disabled
|
||||
```
|
||||
|
@ -3154,6 +3328,8 @@ For example: `https://127.0.0.1:8200/`.
|
|||
| `--vault-addr` | `VAULT_ADDR` | `vault-addr` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-addr=https://127.0.0.1:8200/
|
||||
```
|
||||
|
@ -3201,6 +3377,8 @@ This file is used to verify the Vault server's SSL certificate.
|
|||
| `--vault-cacert` | `VAULT_CACERT` | `vault-cacert` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-cacert=/path/to/ca.pem
|
||||
```
|
||||
|
@ -3247,6 +3425,8 @@ These certificates are used to verify the Vault server's SSL certificate.
|
|||
| `--vault-capath` | `VAULT_CAPATH` | `vault-capath` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-capath=/path/to/certs/
|
||||
```
|
||||
|
@ -3293,6 +3473,8 @@ This file is used for TLS communication with the Vault server.
|
|||
| `--vault-client-cert` | `VAULT_CLIENT_CERT` | `vault-client-cert` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-client-cert=/path/to/client_cert.pem
|
||||
```
|
||||
|
@ -3339,6 +3521,8 @@ corresponds to the matching client certificate.
|
|||
| `--vault-client-key` | `VAULT_CLIENT_KEY` | `vault-client-key` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-client-key=/path/to/private_key.pem
|
||||
```
|
||||
|
@ -3387,6 +3571,8 @@ The default is 2 (for three attempts in total). Set this to 0 or less to disable
|
|||
| `--vault-max-retries` | `VAULT_MAX_RETRIES` | `vault-max-retries` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-max-retries=2
|
||||
```
|
||||
|
@ -3434,6 +3620,8 @@ Specifies the Vault client timeout.
|
|||
| `--vault-client-timeout` | `VAULT_CLIENT_TIMEOUT` | `vault-client-timeout` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-client-timeout=60s
|
||||
```
|
||||
|
@ -3483,6 +3671,8 @@ and is **not recommended**._
|
|||
| `--vault-skip-verify` | `VAULT_SKIP_VERIFY` | `vault-skip-verify` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-skip-verify
|
||||
```
|
||||
|
@ -3528,6 +3718,8 @@ Specifies the name to use as the Server Name Indication (SNI) host when connecti
|
|||
| `--vault-tls-server-name` | `VAULT_TLS_SERVER_NAME` | `vault-tls-server-name` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-tls-server-name=secure.example.com
|
||||
```
|
||||
|
@ -3573,6 +3765,8 @@ Specifies the Vault token use when authenticating with Vault.
|
|||
| `--vault-token` | `VAULT_TOKEN` | `vault-token` |
|
||||
|
||||
###### influxd flag
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh
|
||||
influxd --vault-token=exAmple-t0ken-958a-f490-c7fd0eda5e9e
|
||||
```
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
{{ $prevPage := .Site.GetPage (replaceRE `\/$` "" $prev) }}
|
||||
{{ $nextPage := .Site.GetPage (replaceRE `\/$` "" $next) }}
|
||||
{{ $keepTab := .Get "keepTab" | default false}}
|
||||
{{ $tab := .Get "tab" | default ""}}
|
||||
<div class="page-nav-btns">
|
||||
{{ if ne (len $prev) 0 }}<a class="btn prev{{if $keepTab}} keep-tab{{end}}" href="{{ $prevPage.RelPermalink }}">{{ if ne (len $prevText) 0 }}{{ $prevText }}{{ else if $prevPage.Params.list_title }}{{ $prevPage.Params.list_title }}{{ else }}{{ $prevPage.Title }}{{ end }}</a>{{ end }}
|
||||
{{ if ne (len $next) 0 }}<a class="btn next{{if $keepTab}} keep-tab{{end}}" href="{{ $nextPage.RelPermalink }}">{{ if ne (len $nextText) 0 }}{{ $nextText }}{{ else if $nextPage.Params.list_title }}{{ $nextPage.Params.list_title }}{{ else }}{{ $nextPage.Title }}{{ end }}</a>{{ end }}
|
||||
{{ if ne (len $prev) 0 }}<a class="btn prev{{if $keepTab}} keep-tab{{end}}" href="{{ $prevPage.RelPermalink }}{{ if gt (len $tab) 0}}?t={{ $tab }}{{ end }}">{{ if ne (len $prevText) 0 }}{{ $prevText }}{{ else if $prevPage.Params.list_title }}{{ $prevPage.Params.list_title }}{{ else }}{{ $prevPage.Title }}{{ end }}</a>{{ end }}
|
||||
{{ if ne (len $next) 0 }}<a class="btn next{{if $keepTab}} keep-tab{{end}}" href="{{ $nextPage.RelPermalink }}{{ if gt (len $tab) 0}}?t={{ $tab }}{{ end }}">{{ if ne (len $nextText) 0 }}{{ $nextText }}{{ else if $nextPage.Params.list_title }}{{ $nextPage.Params.list_title }}{{ else }}{{ $nextPage.Title }}{{ end }}</a>{{ end }}
|
||||
</div>
|
|
@ -43,7 +43,7 @@ pre-commit:
|
|||
tags: lint v2
|
||||
glob: "content/influxdb/v2/**/*.md"
|
||||
run: '.ci/vale/vale.sh
|
||||
--config=.vale.ini
|
||||
--config=content/influxdb/v2/.vale.ini
|
||||
--minAlertLevel=error {staged_files}'
|
||||
cloud-pytest:
|
||||
glob: content/influxdb/cloud/**/*.md
|
||||
|
@ -86,7 +86,7 @@ pre-commit:
|
|||
glob: content/influxdb/v2/**/*.md
|
||||
env:
|
||||
- SERVICE: v2-pytest
|
||||
run: docker compose run $SERVICE '{staged_files}'
|
||||
run: docker compose run --rm $SERVICE '{staged_files}'
|
||||
prettier:
|
||||
tags: frontend style
|
||||
glob: "*.{css,js,ts,jsx,tsx}"
|
||||
|
|
|
@ -87,7 +87,8 @@ function substitute_placeholders {
|
|||
/os.getenv("ORG_ID")/! s/ORG_ID/$INFLUX_ORG/g;
|
||||
/os.getenv("RETENTION_POLICY")/! s/RETENTION_POLICY_NAME\|RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g;
|
||||
s/CONFIG_NAME/CONFIG_$(shuf -i 0-100 -n1)/g;
|
||||
s/TEST_RUN/TEST_RUN_$(date +%s)/g' \
|
||||
s/TEST_RUN/TEST_RUN_$(date +%s)/g;
|
||||
s|/path/to/custom/assets-dir|/app/custom-assets|g;' \
|
||||
$file
|
||||
|
||||
# v2-specific replacements.
|
||||
|
|
Loading…
Reference in New Issue