From ee20564750087d311427745876e48a4ba3fc0b63 Mon Sep 17 00:00:00 2001 From: Dustin Eaton Date: Tue, 5 Aug 2025 08:39:42 -0500 Subject: [PATCH 01/13] Clustered release 20250721-1796368 --- .../reference/release-notes/clustered.md | 28 + .../20250721-1796368/app-instance-schema.json | 3255 +++++++++++++++++ .../20250721-1796368/example-customer.yml | 342 ++ 3 files changed, 3625 insertions(+) create mode 100644 static/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json create mode 100644 static/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 9547b3546..7dca2408e 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -61,6 +61,34 @@ directory. This new directory contains artifacts associated with the specified r --- +## 20250721-1796368 {date="2025-07-21"} + +### Quickstart + +```yaml +spec: + package: + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250721-1796368 +``` + +#### Release artifacts +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + + +### Highlights + +#### Integral support + +InfluxQL `INTEGRAL()` function is now supported in the InfluxDB 3.0 database engine. + +### Bug Fixes + +- Fix `SHOW TABLES` timeout when a database has a large number of tables. + +--- + ## 20250707-1777929 {date="2025-07-07"} ### Quickstart diff --git a/static/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json b/static/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json new file mode 100644 index 000000000..d953deceb --- /dev/null +++ b/static/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json @@ -0,0 +1,3255 @@ +{ + "additionalProperties": false, + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "spec": { + "additionalProperties": false, + "properties": { + "imagePullSecrets": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "package": { + "properties": { + "apiVersion": { + "type": "string" + }, + "image": { + "type": "string" + }, + "spec": { + "additionalProperties": false, + "properties": { + "admin": { + "additionalProperties": false, + "description": "OAuth configuration for restricting access to Clustered", + "properties": { + "dsn": { + "additionalProperties": false, + "description": "The dsn for the postgres compatible database", + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "identityProvider": { + "description": "The identity provider to be used e.g. \"keycloak\", \"auth0\", \"azure\"", + "type": "string" + }, + "internalSigningKey": { + "description": "Internal JWT secrets", + "properties": { + "id": { + "additionalProperties": false, + "description": "random ID that uniquely identifies this keypair. Generally a UUID.", + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "privateKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "publicKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "id", + "privateKey", + "publicKey" + ], + "type": "object" + }, + "jwksEndpoint": { + "description": "The JWKS endpoint given by your identity provider. This should look like \"https://{identityProviderDomain}/.well-known/jwks.json\"", + "type": "string" + }, + "users": { + "description": "The list of users to grant access to Clustered via influxctl", + "item": { + "properties": { + "email": { + "description": "The email of the user within your identity provider.", + "type": "string" + }, + "firstName": { + "description": "The first name of the user that will be used in Clustered.", + "type": "string" + }, + "id": { + "description": "The identifier of the user within your identity provider.", + "type": "string" + }, + "lastName": { + "description": "The last name of the user that will be used in Clustered.", + "type": "string" + }, + "userGroups": { + "description": "Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "firstName", + "lastName", + "email", + "id" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "catalog": { + "additionalProperties": false, + "description": "Configuration for the postgres-compatible database that is used as a catalog/metadata store", + "properties": { + "dsn": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "components": { + "additionalProperties": false, + "properties": { + "catalog": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "common": { + "additionalProperties": false, + "description": "Common configuration to all components. They will be overridden by component-specific configuration.\nAny value defined in the component-specific settings will be merged with values defined in the common settings.\n", + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "garbage-collector": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "granite": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ingester": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "router": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "egress": { + "additionalProperties": false, + "description": "Configuration for how external resources are accessed from Clustered components", + "properties": { + "customCertificates": { + "additionalProperties": false, + "description": "Custom certificate or CA Bundle. Used to verify outbound connections performed by influxdb, such as OIDC servers,\npostgres databases, or object store API endpoints.\n\nEquivalent to the SSL_CERT_FILE environment variable used by OpenSSL.\n", + "examples": [ + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "featureFlags": { + "description": "An array of feature flag names. Feature flags (aka feature gates) control features that\nhave not yet been released. They can be experimental to varying degrees (alpha, beta, rc).\n", + "properties": { + "clusteredAuth": { + "description": "Use the authorization service optimized for Clustered deployments.\n\nThis authorization service communicates directly with the locally deployed\ngranite service, which allows it to become ready to validate access tokens\npromptly on pod start up. It also offers more control over the invalidation\nschedule for cached tokens, and may slightly reduce query latency.\n", + "type": "string" + }, + "enableDefaultResourceLimits": { + "description": "Enable Default Resource Limits for Containers\n\nWhen enabled, all containers will have `requests.cpu`, `requests.memory`,\n`limits.cpu`, and `limits.memory` defined. This is particularily useful\nfor namespaces that include a ResourceQuota. When enabling this feature\nflag, make sure to specify the resource limits and requests for the IOx\ncomponents as the defaults may not be properly sized for your cluster.\n", + "type": "string" + }, + "grafana": { + "description": "An experimental, minimal installation of a Grafana Deployment to use alongside Clustered.\n\nOnly this flag if you do not have your own metric visualisation setup and wish\nto experiment with Clustered. It is tested with Grafana v12.0.1-security-01.\n", + "type": "string" + }, + "localTracing": { + "description": "Experimental installation of Jaeger for tracing capabilities with InfluxDB 3.\n\nOnly enable this flag when instructed to do so by the support team.\n", + "type": "string" + }, + "noGrpcProbes": { + "description": "Remove gRPC liveness/readiness probes for debug service", + "type": "string" + }, + "noMinReadySeconds": { + "description": "Experimental flag for Kubernetes clusters that are lower than v1.25.\n\nNo longer uses minReadySeconds for workloads, this will cause downtime.\n", + "type": "string" + }, + "noPrometheus": { + "description": "Disable the install of the default bare-bones Prometheus StatefulSet installation alongside Clustered.\n\nThis feature flag is useful when you already have a monitoring setup and wish to utilise it.\n\nNOTE: In future releases, the `debug-service` will have a partial, minor, dependency on a Prometheus instance being available.\nIf you do not wish for this service to utilise your own installation of Prometheus, disabling it here may cause issues.\n", + "type": "string" + }, + "serviceMonitor": { + "description": "Deprecated. Use observability.serviceMonitor instead.\n\nCreate a ServiceMonitor resource for InfluxDB3.\n", + "type": "string" + }, + "useLicensedBinaries": { + "description": "This flag is deprecated and no longer has any effect. Licensed binaries are now always used.\n", + "type": "string" + } + }, + "type": "array" + }, + "hostingEnvironment": { + "additionalProperties": false, + "description": "Environment or cloud-specific configuration elements which are utilised by InfluxDB Clustered.", + "properties": { + "aws": { + "additionalProperties": false, + "description": "Configuration for hosting on AWS.", + "properties": { + "eksRoleArn": { + "default": "", + "description": "IAM role ARN to apply to the IOx ServiceAccount, used with EKS IRSA.", + "type": "string" + } + }, + "type": "object" + }, + "gke": { + "additionalProperties": false, + "description": "Configuration for hosting on Google Kubernetes Engine (GKE).", + "properties": { + "workloadIdentity": { + "additionalProperties": false, + "description": "Authentication via GKE workload identity. This will annotate the relevant Kubernetes ServiceAccount objects.\nSee https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity for further details.\n", + "properties": { + "serviceAccountEmail": { + "description": "Google IAM Service Account email, this should be in the format \"NAME@PROJECT_ID.iam.gserviceaccount.com\".", + "type": "string" + } + }, + "required": [ + "serviceAccountEmail" + ], + "type": "object" + } + }, + "type": "object" + }, + "openshift": { + "additionalProperties": false, + "description": "Configuration for hosting on Red Hat OpenShift.", + "properties": { }, + "type": "object" + } + }, + "type": "object" + }, + "images": { + "description": "Manipulate how images are retrieved for Clustered. This is typically useful for air-gapped environments when you need to use an internal registry.", + "properties": { + "overrides": { + "description": "Override specific images using the contained predicate fields.\n\nThis takes precedence over the registryOverride field.\n", + "item": { + "description": "Remaps an image matching naming predicates\n", + "properties": { + "name": { + "description": "Naming predicate: the part of the image name that comes after the registry name, e.g.\nIf the image name is \"oci.influxdata.com/foo/bar:1234\", the name field matches \"foo/bar\"\n", + "type": "string" + }, + "newFQIN": { + "description": "Rewrite expression: when a naming predicate matches this image, rewrite the image reference\nusing this Fully Qualified Image Name. i.e. this replaces the whole registry/imagename:tag@digest\nparts of the input image reference.\n", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "registryOverride": { + "default": "", + "description": "Place a new registry prefix infront of all Clustered component images.\n\nThis is used when you wish to maintain the original registry path for images and simply relocate them underneath\nyour own registry.\n\nExample:\nregistryOverride: 'newReg' means 'myregistry/test' becomes 'newReg/myregistry/test'\n", + "type": "string" + } + }, + "type": "object" + }, + "ingesterStorage": { + "additionalProperties": false, + "description": "Storage configuration for the Clustered ingesters.", + "properties": { + "storage": { + "description": "A higher value provides more disk space for the Write-Ahead Log (WAL) to each ingester, allowing for a greater set of leading edge data to be maintained in-memory.\nThis also reduces the frequency of WAL rotations, leading to better query performance and less burden on the compactor.\n\nNote that at 90% capacity, an ingester will stop accepting writes in order to persist its active WAL into the configured object store as parquet files.\n", + "type": "string" + }, + "storageClassName": { + "default": "", + "type": "string" + } + }, + "required": [ + "storage" + ], + "type": "object" + }, + "ingress": { + "additionalProperties": false, + "description": "Configuration for how Clustered components are accessed.", + "properties": { + "grpc": { + "additionalProperties": false, + "description": "Configuration for components which utilise gRPC", + "properties": { + "className": { + "default": "", + "type": "string" + } + }, + "type": "object" + }, + "hosts": { + "description": "A number of hosts/domains to use as entrypoints within the Ingress resources.", + "type": "array" + }, + "http": { + "additionalProperties": false, + "description": "Configuration for components which utilise HTTP", + "properties": { + "className": { + "default": "", + "type": "string" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "description": "Template to apply across configured Ingress-type resources.\nThis allows you to specify a range of third party annotations onto the created Ingress objects and/or\nalter the kind of Ingress you would like to use, e.g. 'Route'.\n", + "oneOf": [ + { + "properties": { + "apiVersion": { + "const": "networking.istio.io/v1beta1" + }, + "kind": { + "const": "Gateway" + }, + "selector": { + "default": { }, + "description": "This selector determines which Istio ingress gateway pods will be chosen\nto handle traffic for the created Gateway resources. A blank selector means that all\ngateway pods in the cluster will handle traffic.\n\nFor more details, see https://istio.io/latest/docs/reference/config/networking/gateway/#Gateway\n", + "type": "object" + } + }, + "required": [ + "apiVersion", + "kind" + ] + }, + { + "properties": { + "apiVersion": { + "enum": [ + "networking.k8s.io/v1", + "route.openshift.io/v1" + ], + "type": "string" + }, + "kind": { + "enum": [ + "Ingress", + "Route" + ], + "type": "string" + } + } + } + ], + "properties": { + "apiVersion": { + "default": "networking.k8s.io/v1", + "enum": [ + "networking.k8s.io/v1", + "route.openshift.io/v1", + "networking.istio.io/v1beta1" + ], + "type": "string" + }, + "kind": { + "default": "Ingress", + "enum": [ + "Ingress", + "Route", + "Gateway" + ], + "type": "string" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations to place onto the objects which enable ingress.", + "type": "object" + } + }, + "type": "object" + }, + "selector": { + "description": "Selector to specify which gateway deployment utilises the configured ingress configuration.\n\nNote that this is only for Istio Gateway, see https://istio.io/latest/docs/reference/config/networking/gateway/#Gateway for further details\n", + "type": "object" + } + }, + "type": "object" + }, + "tlsSecretName": { + "default": "", + "description": "Kubernetes Secret name which contains TLS certificates.\n\nIf you are using cert-manager, this is the name of the Secret to create containing certificates.\nNote that cert-manager is externally managed and is not apart of a Clustered configuration.\n", + "type": "string" + } + }, + "type": "object" + }, + "monitoringStorage": { + "additionalProperties": false, + "description": "Storage configuration for the Prometheus instance shipped alongside Clustered for basic monitoring purposes.", + "properties": { + "storage": { + "description": "The amount of storage to provision for the attached volume, e.g. \"10Gi\".", + "type": "string" + }, + "storageClassName": { + "default": "", + "type": "string" + } + }, + "required": [ + "storage" + ], + "type": "object" + }, + "objectStore": { + "additionalProperties": false, + "description": "Configuration for the backing object store of IOx.", + "oneOf": [ + { + "required": [ + "bucket", + "region" + ] + }, + { + "required": [ + "s3", + "bucket" + ] + }, + { + "required": [ + "azure", + "bucket" + ] + }, + { + "required": [ + "google", + "bucket" + ] + } + ], + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "allowHttp": { + "default": "false", + "type": "string" + }, + "azure": { + "additionalProperties": false, + "description": "Configuration for Azure Blob Storage.", + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "account": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "accessKey", + "account" + ], + "type": "object" + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "default": "", + "type": "string" + }, + "google": { + "additionalProperties": false, + "description": "Configuration for Google Cloud Storage.", + "properties": { + "serviceAccountSecret": { + "additionalProperties": false, + "description": "Authentication via Google IAM Service Account credentials file using a Kubernetes Secret name and key.\nSee https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform for further details.\n\nIf you wish to use GKE IAM annotations, refer to the hostingEnviornment section of the schema.\n", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "region": { + "default": "", + "description": "The region in which the bucket resides. This may not be required dependent on your object store provider.", + "type": "string" + }, + "s3": { + "additionalProperties": false, + "description": "Configuration for AWS S3 (compatible) object stores.", + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "allowHttp": { + "description": "Allow the S3 client to accept insecure HTTP, as well as HTTPS connections to object store.", + "type": "string" + }, + "endpoint": { + "default": "", + "description": "S3 bucket region, see https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region for further details.", + "type": "string" + }, + "region": { + "description": "AWS region for the bucket, such as us-east-1.", + "type": "string" + }, + "secretKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "region" + ], + "type": "object" + }, + "secretKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "observability": { + "additionalProperties": false, + "default": { }, + "description": "Configuration for gaining operational insight into Clustered components", + "properties": { + "retention": { + "default": "12h", + "description": "The retention period for prometheus", + "type": "string" + }, + "serviceMonitor": { + "additionalProperties": false, + "description": "Configure a ServiceMonitor resource to easily expose InfluxDB metrics via the Prometheus Operator.\nSee the Prometheus Operator documentation for usage:\nhttps://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md\n", + "properties": { + "fallbackScrapeProtocol": { + "default": null, + "description": "Specifies which protocol to use when scraping endpoints that return a blank or invalid Content-Type header.\n\nRequired for Prometheus v3.0.0+ only, which enforces Content-Type validation (unlike v2).\n\nFor most standard Prometheus metrics endpoints, including InfluxDB, use \"PrometheusText0.0.4\".\n", + "type": "string" + }, + "interval": { + "default": "30s", + "description": "A duration string that controls the length of time between scrape attempts, ex: '15s', or '1m'", + "type": "string" + }, + "scrapeTimeout": { + "default": null, + "description": "A duration string that controls the scrape timeout duration, ex: '10s'", + "type": "string" + } + }, + "required": [ ], + "type": "object" + } + }, + "type": "object" + }, + "resources": { + "additionalProperties": false, + "properties": { + "catalog": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "4", + "type": "string" + }, + "memory": { + "default": "16Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "8", + "type": "string" + }, + "memory": { + "default": "32Gi", + "type": "string" + }, + "replicas": { + "default": 1, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "garbage-collector": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + }, + "replicas": { + "const": 1, + "description": "Replica configuration for the Garbage Collector.\nNOTE: This component does not support horizontal scaling at this time.\nRefer to https://docs.influxdata.com/influxdb/clustered/reference/internals/storage-engine/#garbage-collector-scaling-strategies\nfor more details.\n", + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "granite": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": "500M", + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "0.5", + "type": "string" + }, + "memory": { + "default": "500M", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ingester": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "6", + "type": "string" + }, + "memory": { + "default": "24Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "prometheus": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "500m", + "type": "string" + }, + "memory": { + "default": "512Mi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "8", + "type": "string" + }, + "memory": { + "default": "32Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "router": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "1", + "type": "string" + }, + "memory": { + "default": "2Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "catalog", + "objectStore", + "ingesterStorage", + "monitoringStorage" + ], + "type": "object" + } + }, + "required": [ + "image", + "apiVersion" + ], + "type": "object" + }, + "pause": { + "default": false, + "type": "boolean" + } + }, + "type": "object" + }, + "status": { + "additionalProperties": true, + "type": "object" + } + }, + "type": "object" +} + diff --git a/static/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml b/static/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml new file mode 100644 index 000000000..0484330ed --- /dev/null +++ b/static/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml @@ -0,0 +1,342 @@ +# yaml-language-server: $schema=app-instance-schema.json +apiVersion: kubecfg.dev/v1alpha1 +kind: AppInstance +metadata: + name: influxdb + namespace: influxdb +spec: + # One or more secrets that are used to pull the images from an authenticated registry. + # This will either be the secret provided to you, if using our registry, or a secret for your own registry + # if self-hosting the images. + imagePullSecrets: + - name: + package: + # The version of the clustered package that will be used. + # This determines the version of all of the individual components. + # When a new version of the product is released, this version should be updated and any + # new config options should be updated below. + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250721-1796368 + apiVersion: influxdata.com/v1alpha1 + spec: + # # Provides a way to pass down hosting environment specific configuration, such as an role ARN when using EKS IRSA. + # # This section contains three multually-exclusive "blocks". Uncomment the block named after the hosting environment + # # you run: "aws", "openshift" or "gke". + # hostingEnvironment: + # # # Uncomment this block if you're running in EKS. + # # aws: + # # eksRoleArn: 'arn:aws:iam::111111111111:role/your-influxdb-clustered-role' + # # + # # # Uncomment this block if you're running inside OpenShift. + # # # Note: there are currently no OpenShift-specific parameters. You have to pass an empty object + # # # as a marker that you're choosing OpenShift as hosting environment. + # # openshift: {} + # # + # # # Uncomment this block if you're running in GKE: + # # gke: + # # # Authenticate to Google Cloud services via workload identity, this + # # # annotates the 'iox' ServiceAccount with the role name you specify. + # # # NOTE: This setting just enables GKE specific authentication mechanism, + # # # You still need to enable `spec.objectStore.google` below if you want to use GCS. + # # workloadIdentity: + # # # Google Service Account name to use for the workload identity. + # # serviceAccountEmail: @.iam.gserviceaccount.com + catalog: + # A postgresql style DSN that points at a postgresql compatible database. + # eg: postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...] + dsn: + valueFrom: + secretKeyRef: + name: + key: + + # images: + # # This can be used to override a specific image name with its FQIN + # # (Fully Qualified Image Name) for testing. eg. + # overrides: + # - name: influxdb2-artifacts/iox/iox + # newFQIN: mycompany/test-iox-build:aninformativetag + # + # # Set this variable to the prefix of your internal registry. This will be prefixed to all expected images. + # # eg. us-docker.pkg.dev/iox:latest => registry.mycompany.io/us-docker.pkg.dev/iox:latest + # registryOverride: + + objectStore: + # Bucket that the parquet files will be stored in + bucket: + + # Uncomment one of the following (s3, azure) + # to enable the configuration of your object store + s3: + # URL for S3 Compatible object store + endpoint: + + # Set to true to allow communication over HTTP (instead of HTTPS) + allowHttp: "false" + + # S3 Access Key + # This can also be provided as a valueFrom: secretKeyRef: + accessKey: + value: + + # S3 Secret Key + # This can also be provided as a valueFrom: secretKeyRef: + secretKey: + value: + + # This value is required for AWS S3, it may or may not be required for other providers. + region: + + # azure: + # Azure Blob Storage Access Key + # This can also be provided as a valueFrom: secretKeyRef: + # accessKey: + # value: + + # Azure Blob Storage Account + # This can also be provided as a valueFrom: secretKeyRef: + # account: + # value: + + # There are two main ways you can access a Google: + # + # a) GKE Workload Identity: configure workload identity in the top level `hostingEnvironment.gke` section. + # b) Explicit service account secret (JSON) file: use the `serviceAccountSecret` field here + # + # If you pick (a) you may not need to uncomment anything else in this section, + # but you still need to tell influxdb that you intend to use Google Cloud Storage. + # so you need to specify an empty object. Uncomment the following line: + # + # google: {} + # + # + # If you pick (b), uncomment the following block: + # + # google: + # # If you're authenticating to Google Cloud service using a Service Account credentials file, as opposed + # # as to use workload identity (see above) you need to provide a reference to a k8s secret containing the credentials file. + # serviceAccountSecret: + # # Kubernetes Secret name containing the credentials for a Google IAM Service Account. + # name: + # # The key within the Secret containing the credentials. + # key: + + # Parameters to tune observability configuration, such as Prometheus ServiceMonitor's. + observability: {} + # retention: 12h + # serviceMonitor: + # interval: 10s + # scrapeTimeout: 30s + + # Ingester pods have a volume attached. + ingesterStorage: + # (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics. + # If not set, the default storage class will be used. + # storageClassName: + # Set the storage size (minimum 2Gi recommended) + storage: + + # Monitoring pods have a volume attached. + monitoringStorage: + # (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics. + # If not set, the default storage class will be used. + # storageClassName: + # Set the storage size (minimum 10Gi recommended) + storage: + + # Uncomment the follow block if using our provided Ingress. + # + # We currently only support the ingress NGINX ingress controller: https://github.com/kubernetes/ingress-nginx + # + # ingress: + # hosts: + # # This is the host on which you will access Influxdb 3.0, for both reads and writes + # - + + # (Optional) + # The name of the Kubernetes Secret containing a TLS certificate, this should exist in the same namespace as the Clustered installation. + # If you are using cert-manager, enter a name for the Secret it should create. + # tlsSecretName: + + # http: + # # Usually you have only one ingress controller installed in a given cluster. + # # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use + # className: nginx + + # grpc: + # # Usually you have only one ingress controller installed in a given cluster. + # # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use + # className: nginx + # + # Enables specifying which 'type' of Ingress to use, alongside whether to place additional annotations + # onto those objects, this is useful for third party software in your environment, such as cert-manager. + # template: + # apiVersion: 'route.openshift.io/v1' + # kind: 'Route' + # metadata: + # annotations: + # 'example-annotation': 'annotation-value' + + # Enables specifying customizations for the various components in InfluxDB 3.0. + # components: + # # router: + # # template: + # # containers: + # # iox: + # # env: + # # INFLUXDB_IOX_MAX_HTTP_REQUESTS: "5000" + # # nodeSelector: + # # disktype: ssd + # # tolerations: + # # - effect: NoSchedule + # # key: example + # # operator: Exists + # # Common customizations for all components go in a pseudo-component called "common" + # # common: + # # template: + # # # Metadata contains custom annotations (and labels) to be added to a component. E.g.: + # # metadata: + # # annotations: + # # telegraf.influxdata.com/class: "foo" + + # Example of setting nodeAffinity for the querier component to ensure it runs on nodes with specific labels + # components: + # # querier: + # # template: + # # affinity: + # # nodeAffinity: + # # requiredDuringSchedulingIgnoredDuringExecution: + # # Node must have these labels to be considered for scheduling + # # nodeSelectorTerms: + # # - matchExpressions: + # # - key: required + # # operator: In + # # values: + # # - ssd + # # preferredDuringSchedulingIgnoredDuringExecution: + # # Scheduler will prefer nodes with these labels but they're not required + # # - weight: 1 + # # preference: + # # matchExpressions: + # # - key: preferred + # # operator: In + # # values: + # # - postgres + + # Example of setting podAntiAffinity for the querier component to ensure it runs on nodes with specific labels + # components: + # # querier: + # # template: + # # affinity: + # # podAntiAffinity: + # # requiredDuringSchedulingIgnoredDuringExecution: + # # Ensures that the pod will not be scheduled on a node if another pod matching the labelSelector is already running there + # # - labelSelector: + # # matchExpressions: + # # - key: app + # # operator: In + # # values: + # # - querier + # # topologyKey: "kubernetes.io/hostname" + # # preferredDuringSchedulingIgnoredDuringExecution: + # # Scheduler will prefer not to schedule pods together but may do so if necessary + # # - weight: 1 + # # podAffinityTerm: + # # labelSelector: + # # matchExpressions: + # # - key: app + # # operator: In + # # values: + # # - querier + # # topologyKey: "kubernetes.io/hostname" + + # Uncomment the following block to tune the various pods for their cpu/memory/replicas based on workload needs. + # Only uncomment the specific resources you want to change, anything uncommented will use the package default. + # (You can read more about k8s resources and limits in https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) + # + # resources: + # # The ingester handles data being written + # ingester: + # requests: + # cpu: + # memory: + # replicas: # The default for ingesters is 3 to increase availability + # + # # optionally you can specify the resource limits which improves isolation. + # # (see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) + # # limits: + # # cpu: + # # memory: + + # # The compactor reorganizes old data to improve query and storage efficiency. + # compactor: + # requests: + # cpu: + # memory: + # replicas: # the default is 1 + + # # The querier handles querying data. + # querier: + # requests: + # cpu: + # memory: + # replicas: # the default is 3 + + # # The router performs some api routing. + # router: + # requests: + # cpu: + # memory: + # replicas: # the default is 3 + + admin: + # The list of users to grant access to Clustered via influxctl + users: + # First name of user + - firstName: + # Last name of user + lastName: + # Email of user + email: + # The ID that the configured Identity Provider uses for the user in oauth flows + id: + # Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member + userGroups: + - + + # The dsn for the postgres compatible database (note this is the same as defined above) + dsn: + valueFrom: + secretKeyRef: + name: + key: + # The identity provider to be used e.g. "keycloak", "auth0", "azure", etc + # Note for Azure Active Directory it must be exactly "azure" + identityProvider: + # The JWKS endpoint provided by the Identity Provider + jwksEndpoint: + + # # This (optional) section controls how InfluxDB issues outbound requests to other services + # egress: + # # If you're using a custom CA you will need to specify the full custom CA bundle here. + # # + # # NOTE: the custom CA is currently only honoured for outbound requests used to obtain + # # the JWT public keys from your identiy provider (see `jwksEndpoint`). + # customCertificates: + # valueFrom: + # configMapKeyRef: + # key: ca.pem + # name: custom-ca + + # We also include the ability to enable some features that are not yet ready for general availability + # or for which we don't yet have a proper place to turn on an optional feature in the configuration file. + # To turn on these you should include the name of the feature flag in the `featureFlag` array. + # + # featureFlags: + # # Uncomment to install a Grafana deployment. + # # Depends on one of the prometheus features being deployed. + # # - grafana + + # # The following 2 flags should be uncommented for k8s API 1.21 support. + # # Note that this is an experimental configuration. + # # - noMinReadySeconds + # # - noGrpcProbes From a38dd87629825c0265e88ab989e8ae118500d137 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 5 Aug 2025 09:06:07 -0500 Subject: [PATCH 02/13] Update content/influxdb3/clustered/reference/release-notes/clustered.md --- .../influxdb3/clustered/reference/release-notes/clustered.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 7dca2408e..a82fa5c9b 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -79,7 +79,7 @@ spec: ### Highlights -#### Integral support +#### Support for InfluxQL INTEGRAL() InfluxQL `INTEGRAL()` function is now supported in the InfluxDB 3.0 database engine. From 2e12fa79c32a4616cfd44a7b962d026de01e2dca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 17:14:39 +0000 Subject: [PATCH 03/13] chore(deps): bump tmp from 0.2.3 to 0.2.4 Bumps [tmp](https://github.com/raszi/node-tmp) from 0.2.3 to 0.2.4. - [Changelog](https://github.com/raszi/node-tmp/blob/master/CHANGELOG.md) - [Commits](https://github.com/raszi/node-tmp/compare/v0.2.3...v0.2.4) --- updated-dependencies: - dependency-name: tmp dependency-version: 0.2.4 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yarn.lock b/yarn.lock index 65e19bf64..00856c0cd 100644 --- a/yarn.lock +++ b/yarn.lock @@ -5046,9 +5046,9 @@ tldts@^6.1.32: tldts-core "^6.1.86" tmp@~0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.3.tgz#eb783cc22bc1e8bebd0671476d46ea4eb32a79ae" - integrity sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w== + version "0.2.4" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.4.tgz#c6db987a2ccc97f812f17137b36af2b6521b0d13" + integrity sha512-UdiSoX6ypifLmrfQ/XfiawN6hkjSBpCjhKxxZcWlUUmoXLaCKQU0bx4HF/tdDK2uzRuchf1txGvrWBzYREssoQ== to-buffer@^1.1.1: version "1.2.1" From 407a5e6de688a92e8e24eca61117da29812192c5 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 5 Aug 2025 15:50:13 -0500 Subject: [PATCH 04/13] 1. Updated Core serve.md Options Section - Updated /content/influxdb3/core/reference/cli/influxdb3/serve.md with 84 Core-compatible options - Excluded 21 Enterprise-only options (like cluster-id, license-email, license-file, mode, compaction options, clustering options, etc.) - All options now link to the Core configuration documentation - Fixed the environment variables link to point to Core instead of Enterprise 2. Updated Enterprise serve.md Options Section - Updated /content/influxdb3/enterprise/reference/cli/influxdb3/serve.md with all 105 configuration options - Includes both Core and Enterprise-only options - Maintains proper required option markers for node-id and cluster-id - All options link to the Enterprise configuration documentation 3. Verified Content Consistency - Core serve.md: Contains examples without cluster-id parameter (appropriate for Core) - Enterprise serve.md: Contains examples with both node-id and cluster-id parameters, plus Enterprise-specific mode examples - Both maintain consistent structure and troubleshooting sections appropriate to their respective products - Environment variables sections correctly reference their respective configuration documentation 4. Allow Vale to accept "parquet" in lowercase when it appears in: - Command-line options (e.g., --parquet-mem-cache-size) - Hyphenated configuration names (e.g., parquet-mem-cache-prune-interval) - Code blocks or inline code (e.g., `parquet`) Key Changes Made: - Core: Now includes 84 options (was 69), excluding Enterprise-only features - Enterprise: Now includes all 105 options (was 78), comprehensive coverage - Alphabetical ordering: Both option tables are now properly alphabetized - New options added: Many previously missing options like buffer-mem-limit-mb, tcp-listener-file-path, telemetry-*, wal-replay-*, etc. - Proper segregation: Core users no longer see Enterprise-only configuration options - Vale allows Parquet or parquet in the appropriate context --- .../vocabularies/InfluxDataDocs/accept.txt | 2 +- .../reference/cli/influxdb3/delete/_index.md | 6 +- .../reference/cli/influxdb3/delete/token.md | 18 + .../core/reference/cli/influxdb3/serve.md | 110 +- .../core/reference/config-options.md | 1051 +--------- .../reference/cli/influxdb3/delete/_index.md | 6 +- .../reference/cli/influxdb3/delete/token.md | 18 + .../reference/cli/influxdb3/serve.md | 30 + .../enterprise/reference/config-options.md | 352 +++- .../shared/influxdb3-cli/config-options.md | 1690 +++++++++++++++++ content/shared/influxdb3-cli/delete/_index.md | 4 +- content/shared/influxdb3-cli/delete/token.md | 32 + 12 files changed, 2213 insertions(+), 1106 deletions(-) create mode 100644 content/influxdb3/core/reference/cli/influxdb3/delete/token.md create mode 100644 content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md create mode 100644 content/shared/influxdb3-cli/config-options.md create mode 100644 content/shared/influxdb3-cli/delete/token.md diff --git a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt index 1ebaf7d46..a56077021 100644 --- a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt +++ b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt @@ -31,7 +31,7 @@ LogicalPlan [Mm]onitor MBs? PBs? -Parquet +Parquet|\b\w*-*parquet-\w*\b|\b--\w*parquet\w*\b|`[^`]*parquet[^`]*` Redoc SQLAlchemy SQLAlchemy diff --git a/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md b/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md index d15e985a9..954678c45 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md +++ b/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md @@ -1,7 +1,7 @@ --- title: influxdb3 delete description: > - The `influxdb3 delete` command deletes a resource such as a database or a table. + The `influxdb3 delete` command deletes a resource such as a cache, database, or table. menu: influxdb3_core: parent: influxdb3 @@ -10,6 +10,6 @@ weight: 300 source: /shared/influxdb3-cli/delete/_index.md --- - diff --git a/content/influxdb3/core/reference/cli/influxdb3/delete/token.md b/content/influxdb3/core/reference/cli/influxdb3/delete/token.md new file mode 100644 index 000000000..5a7caf3c0 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/delete/token.md @@ -0,0 +1,18 @@ +--- +title: influxdb3 delete token +description: > + The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server. +influxdb3/core/tags: [cli] +menu: + influxdb3_core: + parent: influxdb3 delete +weight: 201 +related: + - /influxdb3/core/admin/tokens/ + - /influxdb3/core/api/v3/#tag/Token, InfluxDB /api/v3 Token API reference +source: /shared/influxdb3-cli/delete/token.md +--- + + diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index ffc7fe5af..debeb8ddc 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -36,41 +36,23 @@ influxdb3 serve [OPTIONS] --node-id | :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------ | | {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/core/reference/config-options/#node-id)_ | | | `--object-store` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store)_ | -| | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | -| | `--data-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#data-dir)_ | | | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-http-bind)_ | +| | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ | | | `--aws-access-key-id` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-access-key-id)_ | -| | `--aws-secret-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-secret-access-key)_ | +| | `--aws-allow-http` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-allow-http)_ | | | `--aws-default-region` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-default-region)_ | | | `--aws-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-endpoint)_ | +| | `--aws-secret-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-secret-access-key)_ | | | `--aws-session-token` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-session-token)_ | -| | `--aws-allow-http` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-allow-http)_ | | | `--aws-skip-signature` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-skip-signature)_ | -| | `--google-service-account` | _See [configuration options](/influxdb3/core/reference/config-options/#google-service-account)_ | -| | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-access-key)_ | -| | `--object-store-connection-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-connection-limit)_ | -| | `--object-store-http2-only` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-only)_ | -| | `--object-store-http2-max-frame-size` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-max-frame-size)_ | -| | `--object-store-max-retries` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-max-retries)_ | -| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ | -| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | -| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ | -| `-v` | `--verbose` | Enable verbose output | -| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ | -| | `--log-format` | _See [configuration options](/influxdb3/core/reference/config-options/#log-format)_ | -| | `--traces-exporter` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter)_ | -| | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-host)_ | -| | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-port)_ | -| | `--traces-exporter-jaeger-service-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-service-name)_ | -| | `--traces-exporter-jaeger-trace-context-header-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-trace-context-header-name)_ | -| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-debug-name)_ | -| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-tags)_ | -| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | +| | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | +| | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | +| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/core/reference/config-options/#buffer-mem-limit-mb)_ | +| | `--data-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#data-dir)_ | +| | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ | +| | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ | | | `--datafusion-num-threads` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-num-threads)_ | -| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-runtime-disable-lifo-slot` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-disable-lifo-slot)_ | | | `--datafusion-runtime-event-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-event-interval)_ | | | `--datafusion-runtime-global-queue-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-global-queue-interval)_ | @@ -78,29 +60,67 @@ influxdb3 serve [OPTIONS] --node-id | | `--datafusion-runtime-max-io-events-per-tick` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-max-io-events-per-tick)_ | | | `--datafusion-runtime-thread-keep-alive` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-keep-alive)_ | | | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-priority)_ | -| | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ | +| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-use-cached-parquet-loader)_ | -| | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ | -| | `--max-http-request-size` | _See [configuration options](/influxdb3/core/reference/config-options/#max-http-request-size)_ | -| | `--http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#http-bind)_ | -| | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/core/reference/config-options/#exec-mem-pool-bytes)_ | -| | `--gen1-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-duration)_ | -| | `--wal-flush-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-flush-interval)_ | -| | `--wal-snapshot-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-snapshot-size)_ | -| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ | -| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ | -| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ | -| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ | -| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ | -| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ | +| | `--delete-grace-period` | _See [configuration options](/influxdb3/core/reference/config-options/#delete-grace-period)_ | +| | `--disable-authz` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-authz)_ | | | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-parquet-mem-cache)_ | -| | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#last-cache-eviction-interval)_ | | | `--distinct-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#distinct-cache-eviction-interval)_ | -| | `--plugin-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#plugin-dir)_ | +| | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/core/reference/config-options/#exec-mem-pool-bytes)_ | | | `--force-snapshot-mem-threshold` | _See [configuration options](/influxdb3/core/reference/config-options/#force-snapshot-mem-threshold)_ | -| | `--virtual-env-location` | _See [configuration options](/influxdb3/core/reference/config-options/#virtual-env-location)_ | +| | `--gen1-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-duration)_ | +| | `--gen1-lookback-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-lookback-duration)_ | +| | `--google-service-account` | _See [configuration options](/influxdb3/core/reference/config-options/#google-service-account)_ | +| | `--hard-delete-default-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#hard-delete-default-duration)_ | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | +| | `--http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#http-bind)_ | +| | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#last-cache-eviction-interval)_ | +| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ | +| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ | +| | `--log-format` | _See [configuration options](/influxdb3/core/reference/config-options/#log-format)_ | +| | `--max-http-request-size` | _See [configuration options](/influxdb3/core/reference/config-options/#max-http-request-size)_ | +| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ | +| | `--object-store-connection-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-connection-limit)_ | +| | `--object-store-http2-max-frame-size` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-max-frame-size)_ | +| | `--object-store-http2-only` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-only)_ | +| | `--object-store-max-retries` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-max-retries)_ | +| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ | | | `--package-manager` | _See [configuration options](/influxdb3/core/reference/config-options/#package-manager)_ | +| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ | +| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ | +| | `--parquet-mem-cache-query-path-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-query-path-duration)_ | +| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ | +| | `--plugin-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#plugin-dir)_ | +| | `--preemptive-cache-age` | _See [configuration options](/influxdb3/core/reference/config-options/#preemptive-cache-age)_ | | | `--query-file-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#query-file-limit)_ | +| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ | +| | `--retention-check-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#retention-check-interval)_ | +| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ | +| | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-concurrency-limit)_ | +| | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-max-entries)_ | +| | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#tcp-listener-file-path)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-disable-upload)_ | +| | `--telemetry-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-endpoint)_ | +| | `--tls-cert` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-cert)_ | +| | `--tls-key` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-key)_ | +| | `--tls-minimum-version` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-minimum-version)_ | +| | `--traces-exporter` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter)_ | +| | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-host)_ | +| | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-port)_ | +| | `--traces-exporter-jaeger-service-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-service-name)_ | +| | `--traces-exporter-jaeger-trace-context-header-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-trace-context-header-name)_ | +| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-debug-name)_ | +| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | +| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-tags)_ | +| `-v` | `--verbose` | Enable verbose output | +| | `--virtual-env-location` | _See [configuration options](/influxdb3/core/reference/config-options/#virtual-env-location)_ | +| | `--wal-flush-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-flush-interval)_ | +| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ | +| | `--wal-replay-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-replay-concurrency-limit)_ | +| | `--wal-replay-fail-on-error` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-replay-fail-on-error)_ | +| | `--wal-snapshot-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-snapshot-size)_ | +| | `--without-auth` | _See [configuration options](/influxdb3/core/reference/config-options/#without-auth)_ | {{< caption >}} {{< req text="\* Required options" >}} @@ -110,7 +130,7 @@ influxdb3 serve [OPTIONS] --node-id You can use environment variables to define most `influxdb3 serve` options. For more information, see -[Configuration options](/influxdb3/enterprise/reference/config-options/). +[Configuration options](/influxdb3/core/reference/config-options/). ## Examples diff --git a/content/influxdb3/core/reference/config-options.md b/content/influxdb3/core/reference/config-options.md index 6914536eb..11e29f06c 100644 --- a/content/influxdb3/core/reference/config-options.md +++ b/content/influxdb3/core/reference/config-options.md @@ -8,1052 +8,9 @@ menu: parent: Reference name: Configuration options weight: 100 +source: /shared/influxdb3-cli/config-options.md --- -{{< product-name >}} lets you customize your server configuration by using -`influxdb3 serve` command options or by setting environment variables. - -## Configure your server - -Pass configuration options to the `influxdb serve` server using either command -options or environment variables. Command options take precedence over -environment variables. - -##### Example influxdb3 serve command options - - - -```sh -influxdb3 serve \ - --object-store file \ - --data-dir ~/.influxdb3 \ - --node-id NODE_ID \ - --log-filter info \ - --max-http-request-size 20971520 \ - --aws-allow-http -``` - -##### Example environment variables - - - -```sh -export INFLUXDB3_OBJECT_STORE=file -export INFLUXDB3_DB_DIR=~/.influxdb3 -export INFLUXDB3_WRITER_IDENTIFIER_PREFIX=my-host -export LOG_FILTER=info -export INFLUXDB3_MAX_HTTP_REQUEST_SIZE=20971520 -export AWS_ALLOW_HTTP=true - -influxdb3 serve -``` - -## Server configuration options - -- [General](#general) - - [object-store](#object-store) - - [data-dir](#data-dir) - - [node-id](#node-id) - - [query-file-limit](#query-file-limit) -- [AWS](#aws) - - [aws-access-key-id](#aws-access-key-id) - - [aws-secret-access-key](#aws-secret-access-key) - - [aws-default-region](#aws-default-region) - - [aws-endpoint](#aws-endpoint) - - [aws-session-token](#aws-session-token) - - [aws-allow-http](#aws-allow-http) - - [aws-skip-signature](#aws-skip-signature) -- [Google Cloud Service](#google-cloud-service) - - [google-service-account](#google-service-account) -- [Microsoft Azure](#microsoft-azure) - - [azure-storage-account](#azure-storage-account) - - [azure-storage-access-key](#azure-storage-access-key) -- [Object Storage](#object-storage) - - [bucket](#bucket) - - [object-store-connection-limit](#object-store-connection-limit) - - [object-store-http2-only](#object-store-http2-only) - - [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) - - [object-store-max-retries](#object-store-max-retries) - - [object-store-retry-timeout](#object-store-retry-timeout) - - [object-store-cache-endpoint](#object-store-cache-endpoint) -- [Logs](#logs) - - [log-filter](#log-filter) - - [log-destination](#log-destination) - - [log-format](#log-format) - - [query-log-size](#query-log-size) -- [Traces](#traces) - - [traces-exporter](#traces-exporter) - - [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) - - [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) - - [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) - - [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) - - [traces-jaeger-debug-name](#traces-jaeger-debug-name) - - [traces-jaeger-tags](#traces-jaeger-tags) - - [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) -- [DataFusion](#datafusion) - - [datafusion-num-threads](#datafusion-num-threads) - - [datafusion-runtime-type](#datafusion-runtime-type) - - [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) - - [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) - - [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) - - [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) - - [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) - - [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) - - [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) - - [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) - - [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) - - [datafusion-config](#datafusion-config) -- [HTTP](#http) - - [max-http-request-size](#max-http-request-size) - - [http-bind](#http-bind) - - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) -- [Memory](#memory) - - [exec-mem-pool-bytes](#exec-mem-pool-bytes) - - [buffer-mem-limit-mb](#buffer-mem-limit-mb) - - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) -- [Write-Ahead Log (WAL)](#write-ahead-log-wal) - - [wal-flush-interval](#wal-flush-interval) - - [wal-snapshot-size](#wal-snapshot-size) - - [wal-max-write-buffer-size](#wal-max-write-buffer-size) - - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) -- [Compaction](#compaction) - - [gen1-duration](#gen1-duration) -- [Caching](#caching) - - [preemptive-cache-age](#preemptive-cache-age) - - [parquet-mem-cache-size-mb](#parquet-mem-cache-size-mb) - - [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) - - [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) - - [disable-parquet-mem-cache](#disable-parquet-mem-cache) - - [last-cache-eviction-interval](#last-cache-eviction-interval) - - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) -- [Processing engine](#processing-engine) - - [plugin-dir](#plugin-dir) - - [virtual-env-location](#virtual-env-location) - - [package-manager](#package-manager) - ---- - -### General - -- [object-store](#object-store) -- [data-dir](#data-dir) -- [node-id](#node-id) -- [query-file-limit](#query-file-limit) - -#### object-store - -Specifies which object storage to use to store Parquet files. -This option supports the following values: - -- `memory` -- `memory-throttled` -- `file` -- `s3` -- `google` -- `azure` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------- | -| `--object-store` | `INFLUXDB3_OBJECT_STORE` | - ---- - -#### data-dir - -For the `file` object store, defines the location {{< product-name >}} uses to store files locally. -Required when using the `file` [object store](#object-store). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--data-dir` | `INFLUXDB3_DB_DIR` | - ---- - -#### node-id - -Specifies the node identifier used as a prefix in all object store file paths. -Use a unique node identifier for each host sharing the same object store -configuration--for example, the same bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------------- | -| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | - ---- - -#### query-file-limit - -Limits the number of Parquet files a query can access. - -**Default:** `432` - -With the default `432` setting and the default [`gen1-duration`](#gen1-duration) -setting of 10 minutes, queries can access up to a 72 hours of data, but -potentially less depending on whether all data for a given 10 minute block of -time was ingested during the same period. - -You can increase this limit to allow more files to be queried, but be aware of -the following side-effects: - -- Degraded query performance for queries that read more Parquet files -- Increased memory usage -- Your system potentially killing the `influxdb3` process due to Out-of-Memory - (OOM) errors -- If using object storage to store data, many GET requests to access the data - (as many as 2 per file) - -> [!Note] -> We recommend keeping the default setting and querying smaller time ranges. -> If you need to query longer time ranges or faster query performance on any query -> that accesses an hour or more of data, [InfluxDB 3 Enterprise](/influxdb3/enterprise/) -> optimizes data storage by compacting and rearranging Parquet files to achieve -> faster query performance. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------- | -| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | - ---- - -### AWS - -- [aws-access-key-id](#aws-access-key-id) -- [aws-secret-access-key](#aws-secret-access-key) -- [aws-default-region](#aws-default-region) -- [aws-endpoint](#aws-endpoint) -- [aws-session-token](#aws-session-token) -- [aws-allow-http](#aws-allow-http) -- [aws-skip-signature](#aws-skip-signature) - -#### aws-access-key-id - -When using Amazon S3 as the object store, set this to an access key that has -permission to read from and write to the specified S3 bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-access-key-id` | `AWS_ACCESS_KEY_ID` | - ---- - -#### aws-secret-access-key - -When using Amazon S3 as the object store, set this to the secret access key that -goes with the specified access key ID. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | - ---- - -#### aws-default-region - -When using Amazon S3 as the object store, set this to the region that goes with -the specified bucket if different from the fallback value. - -**Default:** `us-east-1` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-default-region` | `AWS_DEFAULT_REGION` | - ---- - -#### aws-endpoint - -When using an Amazon S3 compatibility storage service, set this to the endpoint. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-endpoint` | `AWS_ENDPOINT` | - ---- - -#### aws-session-token - -When using Amazon S3 as an object store, set this to the session token. This is -handy when using a federated login or SSO and fetching credentials via the UI. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-session-token` | `AWS_SESSION_TOKEN` | - ---- - -#### aws-allow-http - -Allows unencrypted HTTP connections to AWS. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-allow-http` | `AWS_ALLOW_HTTP` | - ---- - -#### aws-skip-signature - -If enabled, S3 object stores do not fetch credentials and do not sign requests. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-skip-signature` | `AWS_SKIP_SIGNATURE` | - ---- - -### Google Cloud Service - -- [google-service-account](#google-service-account) - -#### google-service-account - -When using Google Cloud Storage as the object store, set this to the path to the -JSON file that contains the Google credentials. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :----------------------- | -| `--google-service-account` | `GOOGLE_SERVICE_ACCOUNT` | - ---- - -### Microsoft Azure - -- [azure-storage-account](#azure-storage-account) -- [azure-storage-access-key](#azure-storage-access-key) - -#### azure-storage-account - -When using Microsoft Azure as the object store, set this to the name you see -when navigating to **All Services > Storage accounts > `[name]`**. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--azure-storage-account` | `AZURE_STORAGE_ACCOUNT` | - ---- - -#### azure-storage-access-key - -When using Microsoft Azure as the object store, set this to one of the Key -values in the Storage account's **Settings > Access keys**. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--azure-storage-access-key` | `AZURE_STORAGE_ACCESS_KEY` | - ---- - -### Object Storage - -- [bucket](#bucket) -- [object-store-connection-limit](#object-store-connection-limit) -- [object-store-http2-only](#object-store-http2-only) -- [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) -- [object-store-max-retries](#object-store-max-retries) -- [object-store-retry-timeout](#object-store-retry-timeout) -- [object-store-cache-endpoint](#object-store-cache-endpoint) - -#### bucket - -Sets the name of the object storage bucket to use. Must also set -`--object-store` to a cloud object storage for this option to take effect. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--bucket` | `INFLUXDB3_BUCKET` | - ---- - -#### object-store-connection-limit - -When using a network-based object store, limits the number of connections to -this value. - -**Default:** `16` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :------------------------------ | -| `--object-store-connection-limit` | `OBJECT_STORE_CONNECTION_LIMIT` | - ---- - -#### object-store-http2-only - -Forces HTTP/2 connections to network-based object stores. - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :------------------------ | -| `--object-store-http2-only` | `OBJECT_STORE_HTTP2_ONLY` | - ---- - -#### object-store-http2-max-frame-size - -Sets the maximum frame size (in bytes/octets) for HTTP/2 connections. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--object-store-http2-max-frame-size` | `OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` | - ---- - -#### object-store-max-retries - -Defines the maximum number of times to retry a request. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--object-store-max-retries` | `OBJECT_STORE_MAX_RETRIES` | - ---- - -#### object-store-retry-timeout - -Specifies the maximum length of time from the initial request after which no -further retries are be attempted. - -| influxdb3 serve option | Environment variable | -| :----------------------------- | :--------------------------- | -| `--object-store-retry-timeout` | `OBJECT_STORE_RETRY_TIMEOUT` | - ---- - -#### object-store-cache-endpoint - -Sets the endpoint of an S3-compatible, HTTP/2-enabled object store cache. - -| influxdb3 serve option | Environment variable | -| :------------------------------ | :---------------------------- | -| `--object-store-cache-endpoint` | `OBJECT_STORE_CACHE_ENDPOINT` | - ---- - -### Logs - -- [log-filter](#log-filter) -- [log-destination](#log-destination) -- [log-format](#log-format) -- [query-log-size](#query-log-size) - -#### log-filter - -Sets the filter directive for logs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-filter` | `LOG_FILTER` | - ---- - -#### log-destination - -Specifies the destination for logs. - -**Default:** `stdout` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-destination` | `LOG_DESTINATION` | - ---- - -#### log-format - -Defines the message format for logs. - -This option supports the following values: - -- `full` _(default)_ - -**Default:** `full` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-format` | `LOG_FORMAT` | - ---- - -#### query-log-size - -Defines the size of the query log. Up to this many queries remain in the -log before older queries are evicted to make room for new ones. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--query-log-size` | `INFLUXDB3_QUERY_LOG_SIZE` | - ---- - -### Traces - -- [traces-exporter](#traces-exporter) -- [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) -- [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) -- [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) -- [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) -- [traces-jaeger-debug-name](#traces-jaeger-debug-name) -- [traces-jaeger-tags](#traces-jaeger-tags) -- [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) - -#### traces-exporter - -Sets the type of tracing exporter. - -**Default:** `none` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--traces-exporter` | `TRACES_EXPORTER` | - ---- - -#### traces-exporter-jaeger-agent-host - -Specifies the Jaeger agent network hostname for tracing. - -**Default:** `0.0.0.0` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-host` | `TRACES_EXPORTER_JAEGER_AGENT_HOST` | - ---- - -#### traces-exporter-jaeger-agent-port - -Defines the Jaeger agent network port for tracing. - -**Default:** `6831` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-port` | `TRACES_EXPORTER_JAEGER_AGENT_PORT` | - ---- - -#### traces-exporter-jaeger-service-name - -Sets the Jaeger service name for tracing. - -**Default:** `iox-conductor` - -| influxdb3 serve option | Environment variable | -| :-------------------------------------- | :------------------------------------ | -| `--traces-exporter-jaeger-service-name` | `TRACES_EXPORTER_JAEGER_SERVICE_NAME` | - ---- - -#### traces-exporter-jaeger-trace-context-header-name - -Specifies the header name used for passing trace context. - -**Default:** `uber-trace-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------------------- | :------------------------------------------------- | -| `--traces-exporter-jaeger-trace-context-header-name` | `TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` | - ---- - -#### traces-jaeger-debug-name - -Specifies the header name used for force sampling in tracing. - -**Default:** `jaeger-debug-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :---------------------------------- | -| `--traces-jaeger-debug-name` | `TRACES_EXPORTER_JAEGER_DEBUG_NAME` | - ---- - -#### traces-jaeger-tags - -Defines a set of `key=value` pairs to annotate tracing spans with. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--traces-jaeger-tags` | `TRACES_EXPORTER_JAEGER_TAGS` | - ---- - -#### traces-jaeger-max-msgs-per-second - -Specifies the maximum number of messages sent to a Jaeger service per second. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-jaeger-max-msgs-per-second` | `TRACES_JAEGER_MAX_MSGS_PER_SECOND` | - ---- - -### DataFusion - -- [datafusion-num-threads](#datafusion-num-threads) -- [datafusion-runtime-type](#datafusion-runtime-type) -- [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) -- [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) -- [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) -- [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) -- [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) -- [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) -- [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) -- [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) -- [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) -- [datafusion-config](#datafusion-config) - -#### datafusion-num-threads - -Sets the maximum number of DataFusion runtime threads to use. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :--------------------------------- | -| `--datafusion-num-threads` | `INFLUXDB3_DATAFUSION_NUM_THREADS` | - ---- - -#### datafusion-runtime-type - -Specifies the DataFusion tokio runtime type. - -This option supports the following values: - -- `current-thread` -- `multi-thread` _(default)_ -- `multi-thread-alt` - -**Default:** `multi-thread` - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :---------------------------------- | -| `--datafusion-runtime-type` | `INFLUXDB3_DATAFUSION_RUNTIME_TYPE` | - ---- - -#### datafusion-runtime-disable-lifo-slot - -Disables the LIFO slot of the DataFusion runtime. - -This option supports the following values: - -- `true` -- `false` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-disable-lifo-slot` | `INFLUXDB3_DATAFUSION_RUNTIME_DISABLE_LIFO_SLOT` | - ---- - -#### datafusion-runtime-event-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -tokio runtime polls for external events--for example: timers, I/O. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :-------------------------------------------- | -| `--datafusion-runtime-event-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_EVENT_INTERVAL` | - ---- - -#### datafusion-runtime-global-queue-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -runtime polls the global task queue. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------- | :--------------------------------------------------- | -| `--datafusion-runtime-global-queue-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_GLOBAL_QUEUE_INTERVAL` | - ---- - -#### datafusion-runtime-max-blocking-threads - -Specifies the limit for additional threads spawned by the DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------ | :-------------------------------------------------- | -| `--datafusion-runtime-max-blocking-threads` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_BLOCKING_THREADS` | - ---- - -#### datafusion-runtime-max-io-events-per-tick - -Configures the maximum number of events processed per tick by the tokio -DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :-------------------------------------------- | :---------------------------------------------------- | -| `--datafusion-runtime-max-io-events-per-tick` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_IO_EVENTS_PER_TICK` | - ---- - -#### datafusion-runtime-thread-keep-alive - -Sets a custom timeout for a thread in the blocking pool of the tokio DataFusion -runtime. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-thread-keep-alive` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_KEEP_ALIVE` | - ---- - -#### datafusion-runtime-thread-priority - -Sets the thread priority for tokio DataFusion runtime workers. - -**Default:** `10` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--datafusion-runtime-thread-priority` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_PRIORITY` | - ---- - -#### datafusion-max-parquet-fanout - -When multiple parquet files are required in a sorted way -(deduplication for example), specifies the maximum fanout. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :---------------------------------------- | -| `--datafusion-max-parquet-fanout` | `INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT` | - ---- - -#### datafusion-use-cached-parquet-loader - -Uses a cached parquet loader when reading parquet files from the object store. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-use-cached-parquet-loader` | `INFLUXDB3_DATAFUSION_USE_CACHED_PARQUET_LOADER` | - ---- - -#### datafusion-config - -Provides custom configuration to DataFusion as a comma-separated list of -`key:value` pairs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--datafusion-config` | `INFLUXDB3_DATAFUSION_CONFIG` | - ---- - -### HTTP - -- [max-http-request-size](#max-http-request-size) -- [http-bind](#http-bind) -- [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) - -#### max-http-request-size - -Specifies the maximum size of HTTP requests. - -**Default:** `10485760` - -| influxdb3 serve option | Environment variable | -| :------------------------ | :-------------------------------- | -| `--max-http-request-size` | `INFLUXDB3_MAX_HTTP_REQUEST_SIZE` | - ---- - -#### http-bind - -Defines the address on which InfluxDB serves HTTP API requests. - -**Default:** `0.0.0.0:8181` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--http-bind` | `INFLUXDB3_HTTP_BIND_ADDR` | - ---- - -#### admin-token-recovery-http-bind - -Enables an admin token recovery HTTP server on a separate port. This server allows regenerating lost admin tokens without existing authentication. The server automatically shuts down after a successful token regeneration. - -> [!Warning] -> This option creates an unauthenticated endpoint that can regenerate admin tokens. Only use this when you have lost access to your admin token and ensure the server is only accessible from trusted networks. - -**Default:** `127.0.0.1:8182` (when enabled) - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | - -##### Example usage - -```bash -# Start server with recovery endpoint -influxdb3 serve --admin-token-recovery-http-bind - -# In another terminal, regenerate the admin token -influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 -``` - ---- - -### Memory - -- [exec-mem-pool-bytes](#exec-mem-pool-bytes) -- [buffer-mem-limit-mb](#buffer-mem-limit-mb) -- [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) - -#### exec-mem-pool-bytes - -Specifies the size of the memory pool used during query execution, in bytes. - -**Default:** `8589934592` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------ | -| `--exec-mem-pool-bytes` | `INFLUXDB3_EXEC_MEM_POOL_BYTES` | - ---- - -#### buffer-mem-limit-mb - -Specifies the size limit of the buffered data in MB. If this limit is exceeded, -the server forces a snapshot. - -**Default:** `5000` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------ | -| `--buffer-mem-limit-mb` | `INFLUXDB3_BUFFER_MEM_LIMIT_MB` | - ---- - -#### force-snapshot-mem-threshold - -Specifies the threshold for the internal memory buffer. Supports either a -percentage (portion of available memory)of or absolute value -(total bytes)--for example: `70%` or `100000`. - -**Default:** `70%` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--force-snapshot-mem-threshold` | `INFLUXDB3_FORCE_SNAPSHOT_MEM_THRESHOLD` | - ---- - -### Write-Ahead Log (WAL) - -- [wal-flush-interval](#wal-flush-interval) -- [wal-snapshot-size](#wal-snapshot-size) -- [wal-max-write-buffer-size](#wal-max-write-buffer-size) -- [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) - -#### wal-flush-interval - -Specifies the interval to flush buffered data to a WAL file. Writes that wait -for WAL confirmation take up to this interval to complete. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------- | -| `--wal-flush-interval` | `INFLUXDB3_WAL_FLUSH_INTERVAL` | - ---- - -#### wal-snapshot-size - -Defines the number of WAL files to attempt to remove in a snapshot. This, -multiplied by the interval, determines how often snapshots are taken. - -**Default:** `600` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--wal-snapshot-size` | `INFLUXDB3_WAL_SNAPSHOT_SIZE` | - ---- - -#### wal-max-write-buffer-size - -Specifies the maximum number of write requests that can be buffered before a -flush must be executed and succeed. - -**Default:** `100000` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--wal-max-write-buffer-size` | `INFLUXDB3_WAL_MAX_WRITE_BUFFER_SIZE` | - ---- - -#### snapshotted-wal-files-to-keep - -Specifies the number of snapshotted WAL files to retain in the object store. -Flushing the WAL files does not clear the WAL files immediately; -they are deleted when the number of snapshotted WAL files exceeds this number. - -**Default:** `300` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :-------------------------------- | -| `--snapshotted-wal-files-to-keep` | `INFLUXDB3_NUM_WAL_FILES_TO_KEEP` | - ---- - -### Compaction - -#### gen1-duration - -Specifies the duration that Parquet files are arranged into. Data timestamps -land each row into a file of this duration. Supported durations are `1m`, -`5m`, and `10m`. These files are known as "generation 1" files, which the -compactor in InfluxDB 3 Enterprise can merge into larger generations. - -**Default:** `10m` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------ | -| `--gen1-duration` | `INFLUXDB3_GEN1_DURATION` | - ---- - -### Caching - -- [preemptive-cache-age](#preemptive-cache-age) -- [parquet-mem-cache-size-mb](#parquet-mem-cache-size-mb) -- [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) -- [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) -- [disable-parquet-mem-cache](#disable-parquet-mem-cache) -- [last-cache-eviction-interval](#last-cache-eviction-interval) -- [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) - -#### preemptive-cache-age - -Specifies the interval to prefetch into the Parquet cache during compaction. - -**Default:** `3d` - -| influxdb3 serve option | Environment variable | -| :----------------------- | :------------------------------- | -| `--preemptive-cache-age` | `INFLUXDB3_PREEMPTIVE_CACHE_AGE` | - ---- - -#### parquet-mem-cache-size-mb - -Defines the size of the in-memory Parquet cache in megabytes (MB). - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--parquet-mem-cache-size-mb` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE_MB` | - ---- - -#### parquet-mem-cache-prune-percentage - -Specifies the percentage of entries to prune during a prune operation on the -in-memory Parquet cache. - -**Default:** `0.1` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--parquet-mem-cache-prune-percentage` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_PERCENTAGE` | - ---- - -#### parquet-mem-cache-prune-interval - -Sets the interval to check if the in-memory Parquet cache needs to be pruned. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--parquet-mem-cache-prune-interval` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_INTERVAL` | - ---- - -#### disable-parquet-mem-cache - -Disables the in-memory Parquet cache. By default, the cache is enabled. - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--disable-parquet-mem-cache` | `INFLUXDB3_DISABLE_PARQUET_MEM_CACHE` | - ---- - -#### last-cache-eviction-interval - -Specifies the interval to evict expired entries from the Last-N-Value cache, -expressed as a human-readable time--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | - ---- - -#### distinct-cache-eviction-interval - -Specifies the interval to evict expired entries from the distinct value cache, -expressed as a human-readable time--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | - ---- - -### Processing engine - -- [plugin-dir](#plugin-dir) -- [virtual-env-location](#virtual-env-location) -- [package-manager](#package-manager) - -#### plugin-dir - -Specifies the local directory that contains Python plugins and their test files. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------- | -| `--plugin-dir` | `INFLUXDB3_PLUGIN_DIR` | - ---- - -#### virtual-env-location - -Specifies the location of the Python virtual environment that the processing -engine uses. - -| influxdb3 serve option | Environment variable | -| :----------------------- | :--------------------- | -| `--virtual-env-location` | `VIRTUAL_ENV_LOCATION` | - ---- - -#### package-manager - -Specifies the Python package manager that the processing engine uses. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--package-manager` | `PACKAGE_MANAGER` | + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md index 99fa0418e..05aa0b877 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md @@ -1,7 +1,7 @@ --- title: influxdb3 delete description: > - The `influxdb3 delete` command deletes a resource such as a database or a table. + The `influxdb3 delete` command deletes a resource such as a cache, database, or table. menu: influxdb3_enterprise: parent: influxdb3 @@ -10,6 +10,6 @@ weight: 300 source: /shared/influxdb3-cli/delete/_index.md --- - diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md new file mode 100644 index 000000000..da936f12c --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md @@ -0,0 +1,18 @@ +--- +title: influxdb3 delete token +description: > + The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server. +influxdb3/enterprise/tags: [cli] +menu: + influxdb3_enterprise: + parent: influxdb3 delete +weight: 201 +related: + - /influxdb3/enterprise/admin/tokens/ + - /influxdb3/enterprise/api/v3/#tag/Token, InfluxDB /api/v3 Token API reference +source: /shared/influxdb3-cli/delete/token.md +--- + + diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index f8e927d75..7fb25d5d9 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -38,6 +38,7 @@ influxdb3 serve [OPTIONS] \ | Option | | Description | | :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------ | | | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-http-bind)_ | +| | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ | | | `--aws-access-key-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-access-key-id)_ | | | `--aws-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-allow-http)_ | | | `--aws-default-region` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-default-region)_ | @@ -48,7 +49,11 @@ influxdb3 serve [OPTIONS] \ | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#bucket)_ | +| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#buffer-mem-limit-mb)_ | +| | `--catalog-sync-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#catalog-sync-interval)_ | | {{< req "\*" >}} | `--cluster-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#cluster-id)_ | +| | `--compaction-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-check-interval)_ | +| | `--compaction-cleanup-wait` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-cleanup-wait)_ | | | `--compaction-gen2-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-gen2-duration)_ | | | `--compaction-max-num-files-per-plan` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-max-num-files-per-plan)_ | | | `--compaction-multipliers` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-multipliers)_ | @@ -66,16 +71,22 @@ influxdb3 serve [OPTIONS] \ | | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-thread-priority)_ | | | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-use-cached-parquet-loader)_ | +| | `--delete-grace-period` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#delete-grace-period)_ | +| | `--disable-authz` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-authz)_ | | | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-parquet-mem-cache)_ | | | `--distinct-cache-eviction-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#distinct-cache-eviction-interval)_ | +| | `--distinct-value-cache-disable-from-history` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#distinct-value-cache-disable-from-history)_ | | | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#exec-mem-pool-bytes)_ | | | `--force-snapshot-mem-threshold` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#force-snapshot-mem-threshold)_ | | | `--gen1-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#gen1-duration)_ | +| | `--gen1-lookback-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#gen1-lookback-duration)_ | | | `--google-service-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#google-service-account)_ | +| | `--hard-delete-default-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#hard-delete-default-duration)_ | | `-h` | `--help` | Print help information | | | `--help-all` | Print detailed help information | | | `--http-bind` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#http-bind)_ | | | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#last-cache-eviction-interval)_ | +| | `--last-value-cache-disable-from-history` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#last-value-cache-disable-from-history)_ | | | `--license-email` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#license-email)_ | | | `--license-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#license-file)_ | | | `--log-destination` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#log-destination)_ | @@ -84,6 +95,11 @@ influxdb3 serve [OPTIONS] \ | | `--max-http-request-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#max-http-request-size)_ | | | `--mode` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#mode)_ | | {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id)_ | +| | `--node-id-from-env` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id-from-env)_ | +| | `--num-cores` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-cores)_ | +| | `--num-database-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-database-limit)_ | +| | `--num-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-table-limit)_ | +| | `--num-total-columns-per-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-total-columns-per-table-limit)_ | | | `--object-store` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store)_ | | | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-cache-endpoint)_ | | | `--object-store-connection-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-connection-limit)_ | @@ -101,7 +117,16 @@ influxdb3 serve [OPTIONS] \ | | `--query-file-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-file-limit)_ | | | `--query-log-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-log-size)_ | | | `--replication-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#replication-interval)_ | +| | `--retention-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#retention-check-interval)_ | | | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#snapshotted-wal-files-to-keep)_ | +| | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-concurrency-limit)_ | +| | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-max-entries)_ | +| | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tcp-listener-file-path)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-disable-upload)_ | +| | `--telemetry-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-endpoint)_ | +| | `--tls-cert` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-cert)_ | +| | `--tls-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-key)_ | +| | `--tls-minimum-version` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-minimum-version)_ | | | `--traces-exporter` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter)_ | | | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter-jaeger-agent-host)_ | | | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter-jaeger-agent-port)_ | @@ -110,11 +135,16 @@ influxdb3 serve [OPTIONS] \ | | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-debug-name)_ | | | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | | | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-tags)_ | +| | `--use-pacha-tree` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#use-pacha-tree)_ | | `-v` | `--verbose` | Enable verbose output | | | `--virtual-env-location` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#virtual-env-location)_ | +| | `--wait-for-running-ingestor` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wait-for-running-ingestor)_ | | | `--wal-flush-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-flush-interval)_ | | | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-max-write-buffer-size)_ | +| | `--wal-replay-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-replay-concurrency-limit)_ | +| | `--wal-replay-fail-on-error` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-replay-fail-on-error)_ | | | `--wal-snapshot-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-snapshot-size)_ | +| | `--without-auth` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#without-auth)_ | {{< caption >}} {{< req text="\* Required options" >}} diff --git a/content/influxdb3/enterprise/reference/config-options.md b/content/influxdb3/enterprise/reference/config-options.md index ee00c6dd2..50f81207b 100644 --- a/content/influxdb3/enterprise/reference/config-options.md +++ b/content/influxdb3/enterprise/reference/config-options.md @@ -128,6 +128,8 @@ influxdb3 serve - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) - [compaction-gen2-duration](#compaction-gen2-duration) - [compaction-multipliers](#compaction-multipliers) + - [compaction-cleanup-wait](#compaction-cleanup-wait) + - [compaction-check-interval](#compaction-check-interval) - [gen1-duration](#gen1-duration) - [Caching](#caching) - [preemptive-cache-age](#preemptive-cache-age) @@ -140,11 +142,38 @@ influxdb3 serve - [last-value-cache-disable-from-history](#last-value-cache-disable-from-history) - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) - [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history) + - [table-index-cache-max-entries](#table-index-cache-max-entries) + - [table-index-cache-concurrency-limit](#table-index-cache-concurrency-limit) - [query-file-limit](#query-file-limit) - [Processing Engine](#processing-engine) - [plugin-dir](#plugin-dir) - [virtual-env-location](#virtual-env-location) - [package-manager](#package-manager) +- [Cluster Management](#cluster-management) + - [replication-interval](#replication-interval) + - [catalog-sync-interval](#catalog-sync-interval) + - [wait-for-running-ingestor](#wait-for-running-ingestor) +- [Resource Limits](#resource-limits) + - [num-cores](#num-cores) + - [num-database-limit](#num-database-limit) + - [num-table-limit](#num-table-limit) + - [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) +- [Data Lifecycle Management](#data-lifecycle-management) + - [gen1-lookback-duration](#gen1-lookback-duration) + - [retention-check-interval](#retention-check-interval) + - [delete-grace-period](#delete-grace-period) + - [hard-delete-default-duration](#hard-delete-default-duration) +- [WAL Advanced Options](#wal-advanced-options) + - [wal-replay-fail-on-error](#wal-replay-fail-on-error) + - [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) +- [Telemetry](#telemetry) + - [telemetry-disable-upload](#telemetry-disable-upload) + - [telemetry-endpoint](#telemetry-endpoint) +- [TCP Listeners](#tcp-listeners) + - [tcp-listener-file-path](#tcp-listener-file-path) + - [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) +- [Experimental Features](#experimental-features) + - [use-pacha-tree](#use-pacha-tree) --- @@ -315,6 +344,10 @@ Default is `tls-1.2`. Disables authentication for all server actions (CLI commands and API requests). The server processes all requests without requiring tokens or authentication. +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--without-auth` | `INFLUXDB3_START_WITHOUT_AUTH`| + --- #### disable-authz @@ -322,6 +355,10 @@ The server processes all requests without requiring tokens or authentication. Optionally disable authz by passing in a comma separated list of resources. Valid values are `health`, `ping`, and `metrics`. +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------- | +| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ`| + --- ### AWS @@ -1080,8 +1117,25 @@ to delete files marked as needing deletion during that compaction run. | :-------------------------- | :--------------------------------------------- | | `--compaction-cleanup-wait` | `INFLUXDB3_ENTERPRISE_COMPACTION_CLEANUP_WAIT` | +{{% show-in "enterprise" %}} + --- +#### compaction-check-interval + +Specifies how often the compactor checks for new compaction work to perform. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :------------------------------------------------ | +| `--compaction-check-interval` | `INFLUXDB3_ENTERPRISE_COMPACTION_CHECK_INTERVAL` | + +{{% /show-in %}} + +--- + + #### gen1-duration Specifies the duration that Parquet files are arranged into. Data timestamps @@ -1199,6 +1253,8 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. | :------------------------------- | :--------------------------------------- | | `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | +{{% show-in "enterprise" %}} + --- #### last-value-cache-disable-from-history @@ -1206,9 +1262,11 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. Disables populating the last-N-value cache from historical data. If disabled, the cache is still populated with data from the write-ahead log (WAL). -| influxdb3 serve option | Environment variable | -| :---------------------------------------- | :------------------------------------------------ | -| `--last-value-cache-disable-from-history` | `INFLUXDB3_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY` | +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :---------------------------------------------------------- | +| `--last-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} --- @@ -1223,6 +1281,7 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. | :----------------------------------- | :------------------------------------------- | | `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | +{{% show-in "enterprise" %}} --- #### distinct-value-cache-disable-from-history @@ -1230,9 +1289,36 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. Disables populating the distinct value cache from historical data. If disabled, the cache is still populated with data from the write-ahead log (WAL). -| influxdb3 serve option | Environment variable | -| :-------------------------------------------- | :---------------------------------------------------- | -| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY` | +| influxdb3 serve option | Environment variable | +| :-------------------------------------------- | :-------------------------------------------------------------- | +| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} + +--- + +#### table-index-cache-max-entries + +Specifies the maximum number of entries in the table index cache. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :-------------------------------------------- | +| `--table-index-cache-max-entries` | `INFLUXDB3_TABLE_INDEX_CACHE_MAX_ENTRIES` | + +--- + +#### table-index-cache-concurrency-limit + +Limits the concurrency level for table index cache operations. + +**Default:** `8` + +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :------------------------------------------------- | +| `--table-index-cache-concurrency-limit` | `INFLUXDB3_TABLE_INDEX_CACHE_CONCURRENCY_LIMIT` | + --- #### query-file-limit @@ -1288,3 +1374,257 @@ This option supports the following values: | influxdb3 serve option | Environment variable | | :--------------------- | :------------------- | | `--package-manager` | `PACKAGE_MANAGER` | + +{{% show-in "enterprise" %}} + +--- + +### Cluster Management + + +- [replication-interval](#replication-interval) +- [catalog-sync-interval](#catalog-sync-interval) +- [wait-for-running-ingestor](#wait-for-running-ingestor) + +#### replication-interval + +Specifies the interval at which data replication occurs between cluster nodes. + +**Default:** `250ms` + +| influxdb3 serve option | Environment variable | +| :------------------------- | :------------------------------------------- | +| `--replication-interval` | `INFLUXDB3_ENTERPRISE_REPLICATION_INTERVAL` | + +--- + +#### catalog-sync-interval + +Defines how often the catalog synchronizes across cluster nodes. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------------ | +| `--catalog-sync-interval` | `INFLUXDB3_ENTERPRISE_CATALOG_SYNC_INTERVAL`| + +--- + +#### wait-for-running-ingestor + +Specifies how long to wait for a running ingestor during startup. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :------------------------------------------------ | +| `--wait-for-running-ingestor` | `INFLUXDB3_ENTERPRISE_WAIT_FOR_RUNNING_INGESTOR` | + +{{% /show-in %}} + +{{% show-in "enterprise" %}} +--- + +### Resource Limits + +- [num-cores](#num-cores) +- [num-database-limit](#num-database-limit) +- [num-table-limit](#num-table-limit) +- [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) + +#### num-cores + +Limits the number of CPU cores that InfluxDB Enterprise can use. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :-------------------------------- | +| `--num-cores` | `INFLUXDB3_ENTERPRISE_NUM_CORES` | + +--- + +#### num-database-limit + +Sets the maximum number of databases that can be created. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------------------------- | +| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` | + +--- + +#### num-table-limit + +Defines the maximum number of tables that can be created across all databases. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` | + +--- + +#### num-total-columns-per-table-limit + +Sets the maximum number of columns allowed per table. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :---------------------------------------------------------- | +| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | + +{{% /show-in %}} + +--- + +### Data Lifecycle Management + +- [gen1-lookback-duration](#gen1-lookback-duration) +- [retention-check-interval](#retention-check-interval) +- [delete-grace-period](#delete-grace-period) +- [hard-delete-default-duration](#hard-delete-default-duration) + +#### gen1-lookback-duration + +Specifies how far back to look when creating generation 1 Parquet files. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--gen1-lookback-duration` | `INFLUXDB3_GEN1_LOOKBACK_DURATION` | + +--- + +#### retention-check-interval + +Defines how often the system checks for data that should be deleted according to retention policies. + +**Default:** `1h` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :--------------------------------------- | +| `--retention-check-interval` | `INFLUXDB3_RETENTION_CHECK_INTERVAL` | + +--- + +#### delete-grace-period + +Specifies the grace period before permanently deleting data. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :------------------------ | :--------------------------------- | +| `--delete-grace-period` | `INFLUXDB3_DELETE_GRACE_PERIOD` | + +--- + +#### hard-delete-default-duration + +Sets the default duration for hard deletion of data. + +**Default:** `90d` + +| influxdb3 serve option | Environment variable | +| :---------------------------------- | :-------------------------------------------- | +| `--hard-delete-default-duration` | `INFLUXDB3_HARD_DELETE_DEFAULT_DURATION` | + +--- + +### WAL Advanced Options + +- [wal-replay-fail-on-error](#wal-replay-fail-on-error) +- [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) + +#### wal-replay-fail-on-error + +Determines whether WAL replay should fail when encountering errors. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------- | +| `--wal-replay-fail-on-error` | `INFLUXDB3_WAL_REPLAY_FAIL_ON_ERROR` | + +--- + +#### wal-replay-concurrency-limit + +Sets the maximum number of concurrent WAL replay operations. + +**Default:** `16` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :------------------------------------------ | +| `--wal-replay-concurrency-limit` | `INFLUXDB3_WAL_REPLAY_CONCURRENCY_LIMIT` | + +--- + +### Telemetry + +- [telemetry-disable-upload](#telemetry-disable-upload) +- [telemetry-endpoint](#telemetry-endpoint) + +#### telemetry-disable-upload + +Disables the upload of telemetry data to InfluxData. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--telemetry-disable-upload` | `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` | + +--- + +#### telemetry-endpoint + +Specifies the endpoint for telemetry data uploads. + +| influxdb3 serve option | Environment variable | +| :----------------------- | :--------------------------------- | +| `--telemetry-endpoint` | `INFLUXDB3_TELEMETRY_ENDPOINT` | + +--- + +### TCP Listeners + +- [tcp-listener-file-path](#tcp-listener-file-path) +- [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) + +#### tcp-listener-file-path + +Specifies the file path for the TCP listener configuration. + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :----------------------------------- | +| `--tcp-listener-file-path` | `INFLUXDB3_TCP_LISTINER_FILE_PATH` | + +--- + +#### admin-token-recovery-tcp-listener-file-path + +Specifies the TCP listener file path for admin token recovery operations. + +| influxdb3 serve option | Environment variable | +| :---------------------------------------------- | :-------------------------------------------------------- | +| `--admin-token-recovery-tcp-listener-file-path` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_TCP_LISTENER_FILE_PATH` | + +{{% show-in "enterprise" %}} +--- + +### Experimental Features + +- [use-pacha-tree](#use-pacha-tree) + +#### use-pacha-tree + +Enables the experimental PachaTree storage engine for improved performance. + +> [!Warning] +> This is an experimental feature and should not be used in production environments. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--use-pacha-tree` | `INFLUXDB3_ENTERPRISE_USE_PACHA_TREE` | +{{% /show-in %}} diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md new file mode 100644 index 000000000..287a2521d --- /dev/null +++ b/content/shared/influxdb3-cli/config-options.md @@ -0,0 +1,1690 @@ + +{{< product-name >}} lets you customize your server configuration by using +`influxdb3 serve` command options or by setting environment variables. + +## Configure your server + +Pass configuration options to the `influxdb serve` server using either command +options or environment variables. Command options take precedence over +environment variables. + +##### Example `influxdb3 serve` command options + + + +```sh +influxdb3 serve \ + --node-id node0 \ +{{% show-in "enterprise" %}} --cluster-id cluster0 \ + --license-email example@email.com \{{% /show-in %}} + --object-store file \ + --data-dir ~/.influxdb3 \ + --log-filter info +``` + +##### Example environment variables + + + +```sh +{{% show-in "enterprise" %}}export INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=example@email.com{{% /show-in %}} +{{% show-in "enterprise" %}}export INFLUXDB3_ENTERPRISE_CLUSTER_ID=cluster0{{% /show-in %}} +export INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node +export INFLUXDB3_OBJECT_STORE=file +export INFLUXDB3_DB_DIR=~/.influxdb3 +export LOG_FILTER=info + +influxdb3 serve +``` + +## Server configuration options + +- [General](#general) +{{% show-in "enterprise" %}} - [cluster-id](#cluster-id){{% /show-in %}} + - [data-dir](#data-dir) +{{% show-in "enterprise" %}} - [license-email](#license-email) + - [license-file](#license-file) + - [mode](#mode){{% /show-in %}} + - [node-id](#node-id) +{{% show-in "enterprise" %}} - [node-id-from-env](#node-id-from-env){{% /show-in %}} + - [object-store](#object-store) + - [tls-key](#tls-key) + - [tls-cert](#tls-cert) + - [tls-minimum-versions](#tls-minimum-version) + - [without-auth](#without-auth) + - [disable-authz](#disable-authz) +- [AWS](#aws) + - [aws-access-key-id](#aws-access-key-id) + - [aws-secret-access-key](#aws-secret-access-key) + - [aws-default-region](#aws-default-region) + - [aws-endpoint](#aws-endpoint) + - [aws-session-token](#aws-session-token) + - [aws-allow-http](#aws-allow-http) + - [aws-skip-signature](#aws-skip-signature) +- [Google Cloud Service](#google-cloud-service) + - [google-service-account](#google-service-account) +- [Microsoft Azure](#microsoft-azure) + - [azure-storage-account](#azure-storage-account) + - [azure-storage-access-key](#azure-storage-access-key) +- [Object Storage](#object-storage) + - [bucket](#bucket) + - [object-store-connection-limit](#object-store-connection-limit) + - [object-store-http2-only](#object-store-http2-only) + - [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) + - [object-store-max-retries](#object-store-max-retries) + - [object-store-retry-timeout](#object-store-retry-timeout) + - [object-store-cache-endpoint](#object-store-cache-endpoint) +- [Logs](#logs) + - [log-filter](#log-filter) + - [log-destination](#log-destination) + - [log-format](#log-format) + - [query-log-size](#query-log-size) +- [Traces](#traces) + - [traces-exporter](#traces-exporter) + - [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) + - [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) + - [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) + - [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) + - [traces-jaeger-debug-name](#traces-jaeger-debug-name) + - [traces-jaeger-tags](#traces-jaeger-tags) + - [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) +- [DataFusion](#datafusion) + - [datafusion-num-threads](#datafusion-num-threads) + - [datafusion-runtime-type](#datafusion-runtime-type) + - [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) + - [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) + - [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) + - [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) + - [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) + - [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) + - [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) + - [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) + - [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) + - [datafusion-config](#datafusion-config) +- [HTTP](#http) + - [max-http-request-size](#max-http-request-size) + - [http-bind](#http-bind) + - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) +- [Memory](#memory) + - [exec-mem-pool-bytes](#exec-mem-pool-bytes) + - [buffer-mem-limit-mb](#buffer-mem-limit-mb) + - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) +- [Write-Ahead Log (WAL)](#write-ahead-log-wal) + - [wal-flush-interval](#wal-flush-interval) + - [wal-snapshot-size](#wal-snapshot-size) + - [wal-max-write-buffer-size](#wal-max-write-buffer-size) + - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) + - [wal-replay-fail-on-error](#wal-replay-fail-on-error) + - [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) +- [Compaction](#compaction) +{{% show-in "enterprise" %}} - [compaction-row-limit](#compaction-row-limit) + - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) + - [compaction-gen2-duration](#compaction-gen2-duration) + - [compaction-multipliers](#compaction-multipliers) + - [compaction-cleanup-wait](#compaction-cleanup-wait) + - [compaction-check-interval](#compaction-check-interval){{% /show-in %}} + - [gen1-duration](#gen1-duration) +- [Caching](#caching) + - [preemptive-cache-age](#preemptive-cache-age) + - [parquet-mem-cache-size](#parquet-mem-cache-size) + - [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) + - [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) + - [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) + - [disable-parquet-mem-cache](#disable-parquet-mem-cache) + - [table-index-cache-max-entries](#table-index-cache-max-entries) + - [table-index-cache-concurrency-limit](#table-index-cache-concurrency-limit) +{{% show-in "enterprise" %}} - [last-value-cache-disable-from-history](#last-value-cache-disable-from-history){{% /show-in %}} + - [last-cache-eviction-interval](#last-cache-eviction-interval) +{{% show-in "enterprise" %}} - [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history){{% /show-in %}} + - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) + - [query-file-limit](#query-file-limit) +- [Processing Engine](#processing-engine) + - [plugin-dir](#plugin-dir) + - [virtual-env-location](#virtual-env-location) + - [package-manager](#package-manager) +{{% show-in "enterprise" %}} +- [Cluster Management](#cluster-management) + - [replication-interval](#replication-interval) + - [catalog-sync-interval](#catalog-sync-interval) + - [wait-for-running-ingestor](#wait-for-running-ingestor) +- [Resource Limits](#resource-limits) + - [num-cores](#num-cores) + - [num-database-limit](#num-database-limit) + - [num-table-limit](#num-table-limit) + - [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) +{{% /show-in %}} +- [Data Lifecycle Management](#data-lifecycle-management) + - [gen1-lookback-duration](#gen1-lookback-duration) + - [retention-check-interval](#retention-check-interval) + - [delete-grace-period](#delete-grace-period) + - [hard-delete-default-duration](#hard-delete-default-duration) +- [Telemetry](#telemetry) + - [telemetry-disable-upload](#telemetry-disable-upload) + - [telemetry-endpoint](#telemetry-endpoint) +- [TCP Listeners](#tcp-listeners) + - [tcp-listener-file-path](#tcp-listener-file-path) + - [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) +{{% show-in "enterprise" %}} +- [Experimental Features](#experimental-features) + - [use-pacha-tree](#use-pacha-tree) +{{% /show-in %}} + +--- + +### General + +{{% show-in "enterprise" %}} +- [cluster-id](#cluster-id) +{{% /show-in %}} +- [data-dir](#data-dir) +{{% show-in "enterprise" %}} +- [license-email](#license-email) +- [license-file](#license-file) +- [mode](#mode) +{{% /show-in %}} +- [node-id](#node-id) +{{% show-in "enterprise" %}} +- [node-id-from-env](#node-id-from-env) +{{% /show-in %}} +- [object-store](#object-store) +- [query-file-limit](#query-file-limit) + +{{% show-in "enterprise" %}} +#### cluster-id + +Specifies the cluster identifier that prefixes the object store path for the Enterprise Catalog. +This value must be different than the [`--node-id`](#node-id) value. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------------- | +| `--cluster-id` | `INFLUXDB3_ENTERPRISE_CLUSTER_ID` | + +--- +{{% /show-in %}} + +#### data-dir + +For the `file` object store, defines the location InfluxDB 3 uses to store files locally. +Required when using the `file` [object store](#object-store). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--data-dir` | `INFLUXDB3_DB_DIR` | + +--- + +{{% show-in "enterprise" %}} +#### license-email + +Specifies the email address to associate with your InfluxDB 3 Enterprise license +and automatically responds to the interactive email prompt when the server starts. +This option is mutually exclusive with [license-file](#license-file). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-email` | `INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` | + +--- + +#### license-file + +Specifies the path to a license file for InfluxDB 3 Enterprise. When provided, the license +file's contents are used instead of requesting a new license. +This option is mutually exclusive with [license-email](#license-email). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-file` | `INFLUXDB3_ENTERPRISE_LICENSE_FILE` | + +--- + +#### mode + +Sets the mode to start the server in. + +This option supports the following values: + +- `all` _(default)_: Enables all server modes +- `ingest`: Enables only data ingest capabilities +- `query`: Enables only query capabilities +- `compact`: Enables only compaction processes +- `process`: Enables only data processing capabilities + +You can specify multiple modes using a comma-delimited list (for example, `ingest,query`). + +**Default:** `all` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :-------------------------- | +| `--mode` | `INFLUXDB3_ENTERPRISE_MODE` | + +--- +{{% /show-in %}} + +#### node-id + +Specifies the node identifier used as a prefix in all object store file paths. +This should be unique for any hosts sharing the same object store +configuration--for example, the same bucket. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------------- | +| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | + +{{% show-in "enterprise" %}} +#### node-id-from-env + +Specifies the node identifier used as a prefix in all object store file paths. +Takes the name of an environment variable as an argument and uses the value of that environment variable as the node identifier. +This option cannot be used with the `--node-id` option. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--node-id-from-env` | `INFLUXDB3_NODE_IDENTIFIER_FROM_ENV` | + +##### Example using --node-id-from-env + +```bash +export DATABASE_NODE=node0 && influxdb3 serve \ + --node-id-from-env DATABASE_NODE \ + --cluster-id cluster0 \ + --object-store file \ + --data-dir ~/.influxdb3/data +``` + +--- +{{% /show-in %}} + +#### object-store + +Specifies which object storage to use to store Parquet files. +This option supports the following values: + +- `memory`: Effectively no object persistence +- `memory-throttled`: Like `memory` but with latency and throughput that somewhat resembles a cloud object store +- `file`: Stores objects in the local filesystem (must also set `--data-dir`) +- `s3`: Amazon S3 (must also set `--bucket`, `--aws-access-key-id`, `--aws-secret-access-key`, and possibly `--aws-default-region`) +- `google`: Google Cloud Storage (must also set `--bucket` and `--google-service-account`) +- `azure`: Microsoft Azure blob storage (must also set `--bucket`, `--azure-storage-account`, and `--azure-storage-access-key`) + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------- | +| `--object-store` | `INFLUXDB3_OBJECT_STORE` | + +--- + +#### tls-key + +The path to a key file for TLS to be enabled. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--tls-key` | `INFLUXDB3_TLS_KEY` | + +--- + +#### tls-cert + +The path to a cert file for TLS to be enabled. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--tls-cert` | `INFLUXDB3_TLS_CERT` | + +--- + +#### tls-minimum-version + +The minimum version for TLS. +Valid values are `tls-1.2` or `tls-1.3`. +Default is `tls-1.2`. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :----------------------- | +| `--tls-minimum-version` | `INFLUXDB3_TLS_MINIMUM_VERSION` | + +--- + +#### without-auth + +Disables authentication for all server actions (CLI commands and API requests). +The server processes all requests without requiring tokens or authentication. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--without-auth` | `INFLUXDB3_START_WITHOUT_AUTH`| + +--- + +#### disable-authz + +Optionally disable authz by passing in a comma separated list of resources. +Valid values are `health`, `ping`, and `metrics`. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------- | +| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ`| + +--- + +### AWS + +- [aws-access-key-id](#aws-access-key-id) +- [aws-secret-access-key](#aws-secret-access-key) +- [aws-default-region](#aws-default-region) +- [aws-endpoint](#aws-endpoint) +- [aws-session-token](#aws-session-token) +- [aws-allow-http](#aws-allow-http) +- [aws-skip-signature](#aws-skip-signature) + +#### aws-access-key-id + +When using Amazon S3 as the object store, set this to an access key that has +permission to read from and write to the specified S3 bucket. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-access-key-id` | `AWS_ACCESS_KEY_ID` | + +--- + +#### aws-secret-access-key + +When using Amazon S3 as the object store, set this to the secret access key that +goes with the specified access key ID. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------- | +| `--aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | + +--- + +#### aws-default-region + +When using Amazon S3 as the object store, set this to the region that goes with +the specified bucket if different from the fallback value. + +**Default:** `us-east-1` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-default-region` | `AWS_DEFAULT_REGION` | + +--- + +#### aws-endpoint + +When using an Amazon S3 compatibility storage service, set this to the endpoint. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-endpoint` | `AWS_ENDPOINT` | + +--- + +#### aws-session-token + +When using Amazon S3 as an object store, set this to the session token. This is +handy when using a federated login or SSO and fetching credentials via the UI. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-session-token` | `AWS_SESSION_TOKEN` | + +--- + +#### aws-allow-http + +Allows unencrypted HTTP connections to AWS. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-allow-http` | `AWS_ALLOW_HTTP` | + +--- + +#### aws-skip-signature + +If enabled, S3 object stores do not fetch credentials and do not sign requests. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-skip-signature` | `AWS_SKIP_SIGNATURE` | + +--- + +### Google Cloud Service + +- [google-service-account](#google-service-account) + +#### google-service-account + +When using Google Cloud Storage as the object store, set this to the path to the +JSON file that contains the Google credentials. + +| influxdb3 serve option | Environment variable | +| :------------------------- | :----------------------- | +| `--google-service-account` | `GOOGLE_SERVICE_ACCOUNT` | + +--- + +### Microsoft Azure + +- [azure-storage-account](#azure-storage-account) +- [azure-storage-access-key](#azure-storage-access-key) + +#### azure-storage-account + +When using Microsoft Azure as the object store, set this to the name you see +when navigating to **All Services > Storage accounts > `[name]`**. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------- | +| `--azure-storage-account` | `AZURE_STORAGE_ACCOUNT` | + +--- + +#### azure-storage-access-key + +When using Microsoft Azure as the object store, set this to one of the Key +values in the Storage account's **Settings > Access keys**. + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------- | +| `--azure-storage-access-key` | `AZURE_STORAGE_ACCESS_KEY` | + +--- + +### Object Storage + +- [bucket](#bucket) +- [object-store-connection-limit](#object-store-connection-limit) +- [object-store-http2-only](#object-store-http2-only) +- [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) +- [object-store-max-retries](#object-store-max-retries) +- [object-store-retry-timeout](#object-store-retry-timeout) +- [object-store-cache-endpoint](#object-store-cache-endpoint) + +#### bucket + +Sets the name of the object storage bucket to use. Must also set +`--object-store` to a cloud object storage for this option to take effect. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--bucket` | `INFLUXDB3_BUCKET` | + +--- + +#### object-store-connection-limit + +When using a network-based object store, limits the number of connections to +this value. + +**Default:** `16` + +| influxdb3 serve option | Environment variable | +| :-------------------------------- | :------------------------------ | +| `--object-store-connection-limit` | `OBJECT_STORE_CONNECTION_LIMIT` | + +--- + +#### object-store-http2-only + +Forces HTTP/2 connections to network-based object stores. + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :------------------------ | +| `--object-store-http2-only` | `OBJECT_STORE_HTTP2_ONLY` | + +--- + +#### object-store-http2-max-frame-size + +Sets the maximum frame size (in bytes/octets) for HTTP/2 connections. + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--object-store-http2-max-frame-size` | `OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` | + +--- + +#### object-store-max-retries + +Defines the maximum number of times to retry a request. + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------- | +| `--object-store-max-retries` | `OBJECT_STORE_MAX_RETRIES` | + +--- + +#### object-store-retry-timeout + +Specifies the maximum length of time from the initial request after which no +further retries are be attempted. + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :--------------------------- | +| `--object-store-retry-timeout` | `OBJECT_STORE_RETRY_TIMEOUT` | + +--- + +#### object-store-cache-endpoint + +Sets the endpoint of an S3-compatible, HTTP/2-enabled object store cache. + +| influxdb3 serve option | Environment variable | +| :------------------------------ | :---------------------------- | +| `--object-store-cache-endpoint` | `OBJECT_STORE_CACHE_ENDPOINT` | + +--- + +### Logs + +- [log-filter](#log-filter) +- [log-destination](#log-destination) +- [log-format](#log-format) +- [query-log-size](#query-log-size) + +#### log-filter + +Sets the filter directive for logs. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--log-filter` | `LOG_FILTER` | + +--- + +#### log-destination + +Specifies the destination for logs. + +**Default:** `stdout` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--log-destination` | `LOG_DESTINATION` | + +--- + +#### log-format + +Defines the message format for logs. + +This option supports the following values: + +- `full` _(default)_ + +**Default:** `full` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--log-format` | `LOG_FORMAT` | + +--- + +#### query-log-size + +Defines the size of the query log. Up to this many queries remain in the +log before older queries are evicted to make room for new ones. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------- | +| `--query-log-size` | `INFLUXDB3_QUERY_LOG_SIZE` | + +--- + +### Traces + +- [traces-exporter](#traces-exporter) +- [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) +- [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) +- [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) +- [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) +- [traces-jaeger-debug-name](#traces-jaeger-debug-name) +- [traces-jaeger-tags](#traces-jaeger-tags) +- [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) + +#### traces-exporter + +Sets the type of tracing exporter. + +**Default:** `none` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--traces-exporter` | `TRACES_EXPORTER` | + +--- + +#### traces-exporter-jaeger-agent-host + +Specifies the Jaeger agent network hostname for tracing. + +**Default:** `0.0.0.0` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--traces-exporter-jaeger-agent-host` | `TRACES_EXPORTER_JAEGER_AGENT_HOST` | + +--- + +#### traces-exporter-jaeger-agent-port + +Defines the Jaeger agent network port for tracing. + +**Default:** `6831` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--traces-exporter-jaeger-agent-port` | `TRACES_EXPORTER_JAEGER_AGENT_PORT` | + +--- + +#### traces-exporter-jaeger-service-name + +Sets the Jaeger service name for tracing. + +**Default:** `iox-conductor` + +| influxdb3 serve option | Environment variable | +| :-------------------------------------- | :------------------------------------ | +| `--traces-exporter-jaeger-service-name` | `TRACES_EXPORTER_JAEGER_SERVICE_NAME` | + +--- + +#### traces-exporter-jaeger-trace-context-header-name + +Specifies the header name used for passing trace context. + +**Default:** `uber-trace-id` + +| influxdb3 serve option | Environment variable | +| :--------------------------------------------------- | :------------------------------------------------- | +| `--traces-exporter-jaeger-trace-context-header-name` | `TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` | + +--- + +#### traces-jaeger-debug-name + +Specifies the header name used for force sampling in tracing. + +**Default:** `jaeger-debug-id` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :---------------------------------- | +| `--traces-jaeger-debug-name` | `TRACES_EXPORTER_JAEGER_DEBUG_NAME` | + +--- + +#### traces-jaeger-tags + +Defines a set of `key=value` pairs to annotate tracing spans with. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--traces-jaeger-tags` | `TRACES_EXPORTER_JAEGER_TAGS` | + +--- + +#### traces-jaeger-max-msgs-per-second + +Specifies the maximum number of messages sent to a Jaeger service per second. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--traces-jaeger-max-msgs-per-second` | `TRACES_JAEGER_MAX_MSGS_PER_SECOND` | + +--- + +### DataFusion + +- [datafusion-num-threads](#datafusion-num-threads) +- [datafusion-runtime-type](#datafusion-runtime-type) +- [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) +- [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) +- [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) +- [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) +- [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) +- [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) +- [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) +- [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) +- [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) +- [datafusion-config](#datafusion-config) + +#### datafusion-num-threads + +Sets the maximum number of DataFusion runtime threads to use. + +| influxdb3 serve option | Environment variable | +| :------------------------- | :--------------------------------- | +| `--datafusion-num-threads` | `INFLUXDB3_DATAFUSION_NUM_THREADS` | + +--- + +#### datafusion-runtime-type + +Specifies the DataFusion tokio runtime type. + +This option supports the following values: + +- `current-thread` +- `multi-thread` _(default)_ +- `multi-thread-alt` + +**Default:** `multi-thread` + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :---------------------------------- | +| `--datafusion-runtime-type` | `INFLUXDB3_DATAFUSION_RUNTIME_TYPE` | + +--- + +#### datafusion-runtime-disable-lifo-slot + +Disables the LIFO slot of the DataFusion runtime. + +This option supports the following values: + +- `true` +- `false` + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :----------------------------------------------- | +| `--datafusion-runtime-disable-lifo-slot` | `INFLUXDB3_DATAFUSION_RUNTIME_DISABLE_LIFO_SLOT` | + +--- + +#### datafusion-runtime-event-interval + +Sets the number of scheduler ticks after which the scheduler of the DataFusion +tokio runtime polls for external events--for example: timers, I/O. + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :-------------------------------------------- | +| `--datafusion-runtime-event-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_EVENT_INTERVAL` | + +--- + +#### datafusion-runtime-global-queue-interval + +Sets the number of scheduler ticks after which the scheduler of the DataFusion +runtime polls the global task queue. + +| influxdb3 serve option | Environment variable | +| :------------------------------------------- | :--------------------------------------------------- | +| `--datafusion-runtime-global-queue-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_GLOBAL_QUEUE_INTERVAL` | + +--- + +#### datafusion-runtime-max-blocking-threads + +Specifies the limit for additional threads spawned by the DataFusion runtime. + +| influxdb3 serve option | Environment variable | +| :------------------------------------------ | :-------------------------------------------------- | +| `--datafusion-runtime-max-blocking-threads` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_BLOCKING_THREADS` | + +--- + +#### datafusion-runtime-max-io-events-per-tick + +Configures the maximum number of events processed per tick by the tokio +DataFusion runtime. + +| influxdb3 serve option | Environment variable | +| :-------------------------------------------- | :---------------------------------------------------- | +| `--datafusion-runtime-max-io-events-per-tick` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_IO_EVENTS_PER_TICK` | + +--- + +#### datafusion-runtime-thread-keep-alive + +Sets a custom timeout for a thread in the blocking pool of the tokio DataFusion +runtime. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :----------------------------------------------- | +| `--datafusion-runtime-thread-keep-alive` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_KEEP_ALIVE` | + +--- + +#### datafusion-runtime-thread-priority + +Sets the thread priority for tokio DataFusion runtime workers. + +**Default:** `10` + +| influxdb3 serve option | Environment variable | +| :------------------------------------- | :--------------------------------------------- | +| `--datafusion-runtime-thread-priority` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_PRIORITY` | + +--- + +#### datafusion-max-parquet-fanout + +When multiple parquet files are required in a sorted way +(deduplication for example), specifies the maximum fanout. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :-------------------------------- | :---------------------------------------- | +| `--datafusion-max-parquet-fanout` | `INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT` | + +--- + +#### datafusion-use-cached-parquet-loader + +Uses a cached parquet loader when reading parquet files from the object store. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :----------------------------------------------- | +| `--datafusion-use-cached-parquet-loader` | `INFLUXDB3_DATAFUSION_USE_CACHED_PARQUET_LOADER` | + +--- + +#### datafusion-config + +Provides custom configuration to DataFusion as a comma-separated list of +`key:value` pairs. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--datafusion-config` | `INFLUXDB3_DATAFUSION_CONFIG` | + +--- + +### HTTP + +- [max-http-request-size](#max-http-request-size) +- [http-bind](#http-bind) +- [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) + +#### max-http-request-size + +Specifies the maximum size of HTTP requests. + +**Default:** `10485760` + +| influxdb3 serve option | Environment variable | +| :------------------------ | :-------------------------------- | +| `--max-http-request-size` | `INFLUXDB3_MAX_HTTP_REQUEST_SIZE` | + +--- + +#### http-bind + +Defines the address on which InfluxDB serves HTTP API requests. + +**Default:** `0.0.0.0:8181` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------- | +| `--http-bind` | `INFLUXDB3_HTTP_BIND_ADDR` | + +--- + +#### admin-token-recovery-http-bind + +Enables an admin token recovery HTTP server on a separate port. This server allows regenerating lost admin tokens without existing authentication. The server automatically shuts down after a successful token regeneration. + +> [!Warning] +> This option creates an unauthenticated endpoint that can regenerate admin tokens. Only use this when you have lost access to your admin token and ensure the server is only accessible from trusted networks. + +**Default:** `127.0.0.1:8182` (when enabled) + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | + +##### Example usage + +```bash +# Start server with recovery endpoint +influxdb3 serve --admin-token-recovery-http-bind + +# In another terminal, regenerate the admin token +influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 +``` + +--- + +### Memory + +- [exec-mem-pool-bytes](#exec-mem-pool-bytes) +- [buffer-mem-limit-mb](#buffer-mem-limit-mb) +- [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) + +#### exec-mem-pool-bytes + +Specifies the size of memory pool used during query execution. +Can be given as absolute value in bytes or as a percentage of the total available memory--for +example: `8000000000` or `10%`). + +{{% show-in "core" %}}**Default:** `8589934592`{{% /show-in %}} +{{% show-in "enterprise" %}}**Default:** `20%`{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------ | +| `--exec-mem-pool-bytes` | `INFLUXDB3_EXEC_MEM_POOL_BYTES` | + +{{% show-in "core" %}} +--- + +#### buffer-mem-limit-mb + + +Specifies the size limit of the buffered data in MB. If this limit is exceeded, +the server forces a snapshot. + +**Default:** `5000` + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------ | +| `--buffer-mem-limit-mb` | `INFLUXDB3_BUFFER_MEM_LIMIT_MB` | + +{{% /show-in %}} + +--- + +#### force-snapshot-mem-threshold + +Specifies the threshold for the internal memory buffer. Supports either a +percentage (portion of available memory) or absolute value in MB--for example: `70%` or `1000`. + +{{% show-in "core" %}}**Default:** `70%`{{% /show-in %}} +{{% show-in "enterprise" %}}**Default:** `50%`{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :--------------------------------------- | +| `--force-snapshot-mem-threshold` | `INFLUXDB3_FORCE_SNAPSHOT_MEM_THRESHOLD` | + +--- + +### Write-Ahead Log (WAL) + +- [wal-flush-interval](#wal-flush-interval) +- [wal-snapshot-size](#wal-snapshot-size) +- [wal-max-write-buffer-size](#wal-max-write-buffer-size) +- [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) +- [wal-replay-fail-on-error](#wal-replay-fail-on-error) +- [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) + +#### wal-flush-interval + +Specifies the interval to flush buffered data to a WAL file. Writes that wait +for WAL confirmation take up to this interval to complete. + +**Default:** `1s` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------- | +| `--wal-flush-interval` | `INFLUXDB3_WAL_FLUSH_INTERVAL` | + +--- + +#### wal-snapshot-size + +Defines the number of WAL files to attempt to remove in a snapshot. This, +multiplied by the interval, determines how often snapshots are taken. + +**Default:** `600` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--wal-snapshot-size` | `INFLUXDB3_WAL_SNAPSHOT_SIZE` | + +--- + +#### wal-max-write-buffer-size + +Specifies the maximum number of write requests that can be buffered before a +flush must be executed and succeed. + +**Default:** `100000` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--wal-max-write-buffer-size` | `INFLUXDB3_WAL_MAX_WRITE_BUFFER_SIZE` | + +--- + +#### snapshotted-wal-files-to-keep + +Specifies the number of snapshotted WAL files to retain in the object store. +Flushing the WAL files does not clear the WAL files immediately; +they are deleted when the number of snapshotted WAL files exceeds this number. + +**Default:** `300` + +| influxdb3 serve option | Environment variable | +| :-------------------------------- | :-------------------------------- | +| `--snapshotted-wal-files-to-keep` | `INFLUXDB3_NUM_WAL_FILES_TO_KEEP` | + +--- + +#### wal-replay-fail-on-error + +Determines whether WAL replay should fail when encountering errors. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------- | +| `--wal-replay-fail-on-error` | `INFLUXDB3_WAL_REPLAY_FAIL_ON_ERROR` | + +--- + +#### wal-replay-concurrency-limit + +Sets the maximum number of concurrent WAL replay operations. + +**Default:** `16` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :------------------------------------------ | +| `--wal-replay-concurrency-limit` | `INFLUXDB3_WAL_REPLAY_CONCURRENCY_LIMIT` | + +--- + +### Compaction + +{{% show-in "enterprise" %}} +- [compaction-row-limit](#compaction-row-limit) +- [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) +- [compaction-gen2-duration](#compaction-gen2-duration) +- [compaction-multipliers](#compaction-multipliers) +- [compaction-cleanup-wait](#compaction-cleanup-wait) +- [compaction-check-interval](#compaction-check-interval) +{{% /show-in %}} +- [gen1-duration](#gen1-duration) + +{{% show-in "enterprise" %}} +#### compaction-row-limit + +Specifies the soft limit for the number of rows per file that the compactor +writes. The compactor may write more rows than this limit. + +**Default:** `1000000` + +| influxdb3 serve option | Environment variable | +| :----------------------- | :------------------------------------------ | +| `--compaction-row-limit` | `INFLUXDB3_ENTERPRISE_COMPACTION_ROW_LIMIT` | + +--- + +#### compaction-max-num-files-per-plan + +Sets the maximum number of files included in any compaction plan. + +**Default:** `500` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :------------------------------------------------------- | +| `--compaction-max-num-files-per-plan` | `INFLUXDB3_ENTERPRISE_COMPACTION_MAX_NUM_FILES_PER_PLAN` | + +--- + +#### compaction-gen2-duration + +Specifies the duration of the first level of compaction (gen2). Later levels of +compaction are multiples of this duration. This value should be equal to or +greater than the gen1 duration. + +**Default:** `20m` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :---------------------------------------------- | +| `--compaction-gen2-duration` | `INFLUXDB3_ENTERPRISE_COMPACTION_GEN2_DURATION` | + +--- + +#### compaction-multipliers + +Specifies a comma-separated list of multiples defining the duration of each +level of compaction. The number of elements in the list determines the number of +compaction levels. The first element specifies the duration of the first level +(gen3); subsequent levels are multiples of the previous level. + +**Default:** `3,4,6,5` + +| influxdb3 serve option | Environment variable | +| :------------------------- | :-------------------------------------------- | +| `--compaction-multipliers` | `INFLUXDB3_ENTERPRISE_COMPACTION_MULTIPLIERS` | + +--- + +#### compaction-cleanup-wait + +Specifies the amount of time that the compactor waits after finishing a compaction run +to delete files marked as needing deletion during that compaction run. + +**Default:** `10m` + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :--------------------------------------------- | +| `--compaction-cleanup-wait` | `INFLUXDB3_ENTERPRISE_COMPACTION_CLEANUP_WAIT` | + +--- + +#### compaction-check-interval + +Specifies how often the compactor checks for new compaction work to perform. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :------------------------------------------------ | +| `--compaction-check-interval` | `INFLUXDB3_ENTERPRISE_COMPACTION_CHECK_INTERVAL` | + +--- +{{% /show-in %}} + +#### gen1-duration + +Specifies the duration that Parquet files are arranged into. Data timestamps +land each row into a file of this duration. Supported durations are `1m`, +`5m`, and `10m`. These files are known as "generation 1" files{{% show-in "enterprise" %}}, which the +compactor can merge into larger generations{{% /show-in %}}{{% show-in "core" %}} that the +compactor in InfluxDB 3 Enterprise can merge into larger generations{{% /show-in %}}. + +**Default:** `10m` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------ | +| `--gen1-duration` | `INFLUXDB3_GEN1_DURATION` | + +--- + +### Caching + +- [preemptive-cache-age](#preemptive-cache-age) +- [parquet-mem-cache-size](#parquet-mem-cache-size) +- [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) +- [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) +- [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) +- [disable-parquet-mem-cache](#disable-parquet-mem-cache) +- [table-index-cache-max-entries](#table-index-cache-max-entries) +- [table-index-cache-concurrency-limit](#table-index-cache-concurrency-limit) +{{% show-in "enterprise" %}} +- [last-value-cache-disable-from-history](#last-value-cache-disable-from-history) +{{% /show-in %}} +- [last-cache-eviction-interval](#last-cache-eviction-interval) +{{% show-in "enterprise" %}} +- [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history) +{{% /show-in %}} +- [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) + +#### preemptive-cache-age + +Specifies the interval to prefetch into the Parquet cache during compaction. + +**Default:** `3d` + +| influxdb3 serve option | Environment variable | +| :----------------------- | :------------------------------- | +| `--preemptive-cache-age` | `INFLUXDB3_PREEMPTIVE_CACHE_AGE` | + +--- + +#### parquet-mem-cache-size + +Specifies the size of the in-memory Parquet cache{{% show-in "core" %}} in megabytes (MB){{% /show-in %}}{{% show-in "enterprise" %}} in megabytes or percentage of total available memory{{% /show-in %}}. + +{{% show-in "core" %}}**Default:** `1000`{{% /show-in %}} +{{% show-in "enterprise" %}}**Default:** `20%`{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :---------------------------------- | +{{% show-in "core" %}}| `--parquet-mem-cache-size-mb` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE_MB` |{{% /show-in %}} +{{% show-in "enterprise" %}}| `--parquet-mem-cache-size` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE` |{{% /show-in %}} + +#### parquet-mem-cache-prune-percentage + +Specifies the percentage of entries to prune during a prune operation on the +in-memory Parquet cache. + +**Default:** `0.1` + +| influxdb3 serve option | Environment variable | +| :------------------------------------- | :--------------------------------------------- | +| `--parquet-mem-cache-prune-percentage` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_PERCENTAGE` | + +--- + +#### parquet-mem-cache-prune-interval + +Sets the interval to check if the in-memory Parquet cache needs to be pruned. + +**Default:** `1s` + +| influxdb3 serve option | Environment variable | +| :----------------------------------- | :------------------------------------------- | +| `--parquet-mem-cache-prune-interval` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_INTERVAL` | + +--- + +#### parquet-mem-cache-query-path-duration + +{{% show-in "enterprise" %}} +A [duration](/influxdb3/enterprise/reference/glossary/#duration) that specifies +{{% /show-in %}}{{% show-in "core" %}} +Specifies +{{% /show-in %}} +the time window for caching recent Parquet files in memory. Default is `5h`. + +Only files containing data with a timestamp between `now` and `now - duration` +are cached when accessed during queries--for example, with the default `5h` setting: + +- Current time: `2024-06-10 15:00:00` +- Cache window: Last 5 hours (`2024-06-10 10:00:00` to now) + +If a query requests data from `2024-06-09` (old) and `2024-06-10 14:00` (recent): + +- **Cached**: Parquet files with data from `2024-06-10 14:00` (within 5-hour window) +- **Not cached**: Parquet files with data from `2024-06-09` (outside 5-hour window) + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--parquet-mem-cache-query-path-duration` | `INFLUXDB3_PARQUET_MEM_CACHE_QUERY_PATH_DURATION` | + +--- + +#### disable-parquet-mem-cache + +Disables the in-memory Parquet cache. By default, the cache is enabled. + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--disable-parquet-mem-cache` | `INFLUXDB3_DISABLE_PARQUET_MEM_CACHE` | + +--- + +#### table-index-cache-max-entries + +Specifies the maximum number of entries in the table index cache. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :-------------------------------------------- | +| `--table-index-cache-max-entries` | `INFLUXDB3_TABLE_INDEX_CACHE_MAX_ENTRIES` | + +--- + +#### table-index-cache-concurrency-limit + +Limits the concurrency level for table index cache operations. + +**Default:** `8` + +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :------------------------------------------------- | +| `--table-index-cache-concurrency-limit` | `INFLUXDB3_TABLE_INDEX_CACHE_CONCURRENCY_LIMIT` | + +{{% show-in "enterprise" %}} + +--- + +#### last-value-cache-disable-from-history + +Disables populating the last-N-value cache from historical data. +If disabled, the cache is still populated with data from the write-ahead log (WAL). + +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :---------------------------------------------------------- | +| `--last-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} + +--- + +#### last-cache-eviction-interval + +Specifies the interval to evict expired entries from the Last-N-Value cache, +expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :--------------------------------------- | +| `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | + + +{{% show-in "enterprise" %}} +--- + +#### distinct-value-cache-disable-from-history + +Disables populating the distinct value cache from historical data. +If disabled, the cache is still populated with data from the write-ahead log (WAL). + +| influxdb3 serve option | Environment variable | +| :-------------------------------------------- | :-------------------------------------------------------------- | +| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} + +--- + +#### distinct-cache-eviction-interval + +Specifies the interval to evict expired entries from the distinct value cache, +expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :----------------------------------- | :------------------------------------------- | +| `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | + +--- + +#### query-file-limit + +Limits the number of Parquet files a query can access. +If a query attempts to read more than this limit, {{< product-name >}} returns an error. + +{{% show-in "core" %}} +**Default:** `432` + +With the default `432` setting and the default [`gen1-duration`](#gen1-duration) +setting of 10 minutes, queries can access up to a 72 hours of data, but +potentially less depending on whether all data for a given 10 minute block of +time was ingested during the same period. + +You can increase this limit to allow more files to be queried, but be aware of +the following side-effects: + +- Degraded query performance for queries that read more Parquet files +- Increased memory usage +- Your system potentially killing the `influxdb3` process due to Out-of-Memory + (OOM) errors +- If using object storage to store data, many GET requests to access the data + (as many as 2 per file) + +> [!Note] +> We recommend keeping the default setting and querying smaller time ranges. +> If you need to query longer time ranges or faster query performance on any query +> that accesses an hour or more of data, [InfluxDB 3 Enterprise](/influxdb3/enterprise/) +> optimizes data storage by compacting and rearranging Parquet files to achieve +> faster query performance. +{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------- | +| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | + +--- + +### Processing Engine + +- [plugin-dir](#plugin-dir) +- [virtual-env-location](#virtual-env-location) +- [package-manager](#package-manager) + +#### plugin-dir + +Specifies the local directory that contains Python plugins and their test files. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--plugin-dir` | `INFLUXDB3_PLUGIN_DIR` | + +--- + +#### virtual-env-location + +Specifies the location of the Python virtual environment that the processing +engine uses. + +| influxdb3 serve option | Environment variable | +| :----------------------- | :--------------------- | +| `--virtual-env-location` | `VIRTUAL_ENV` | + +--- + +#### package-manager + +Specifies the Python package manager that the processing engine uses. + +This option supports the following values: + +- `discover` _(default)_: Automatically discover available package manager +- `pip`: Use pip package manager +- `uv`: Use uv package manager + +**Default:** `discover` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--package-manager` | `PACKAGE_MANAGER` | + +{{% show-in "enterprise" %}} + +--- + +### Cluster Management + +- [replication-interval](#replication-interval) +- [catalog-sync-interval](#catalog-sync-interval) +- [wait-for-running-ingestor](#wait-for-running-ingestor) + +#### replication-interval + +Specifies the interval at which data replication occurs between cluster nodes. + +**Default:** `250ms` + +| influxdb3 serve option | Environment variable | +| :------------------------- | :------------------------------------------- | +| `--replication-interval` | `INFLUXDB3_ENTERPRISE_REPLICATION_INTERVAL` | + +--- + +#### catalog-sync-interval + +Defines how often the catalog synchronizes across cluster nodes. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------------ | +| `--catalog-sync-interval` | `INFLUXDB3_ENTERPRISE_CATALOG_SYNC_INTERVAL`| + +--- + +#### wait-for-running-ingestor + +Specifies how long to wait for a running ingestor during startup. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :------------------------------------------------ | +| `--wait-for-running-ingestor` | `INFLUXDB3_ENTERPRISE_WAIT_FOR_RUNNING_INGESTOR` | + +--- + +### Resource Limits + +- [num-cores](#num-cores) +- [num-database-limit](#num-database-limit) +- [num-table-limit](#num-table-limit) +- [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) + +#### num-cores + +Limits the number of CPU cores that InfluxDB Enterprise can use. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :-------------------------------- | +| `--num-cores` | `INFLUXDB3_ENTERPRISE_NUM_CORES` | + +--- + +#### num-database-limit + +Sets the maximum number of databases that can be created. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------------------------- | +| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` | + +--- + +#### num-table-limit + +Defines the maximum number of tables that can be created across all databases. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` | + +--- + +#### num-total-columns-per-table-limit + +Sets the maximum number of columns allowed per table. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :---------------------------------------------------------- | +| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | + +{{% /show-in %}} + +--- + +### Data Lifecycle Management + +- [gen1-lookback-duration](#gen1-lookback-duration) +- [retention-check-interval](#retention-check-interval) +- [delete-grace-period](#delete-grace-period) +- [hard-delete-default-duration](#hard-delete-default-duration) + +#### gen1-lookback-duration + +Specifies how far back to look when creating generation 1 Parquet files. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--gen1-lookback-duration` | `INFLUXDB3_GEN1_LOOKBACK_DURATION` | + +--- + +#### retention-check-interval + +Defines how often the system checks for data that should be deleted according to retention policies. + +**Default:** `1h` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :--------------------------------------- | +| `--retention-check-interval` | `INFLUXDB3_RETENTION_CHECK_INTERVAL` | + +--- + +#### delete-grace-period + +Specifies the grace period before permanently deleting data. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :------------------------ | :--------------------------------- | +| `--delete-grace-period` | `INFLUXDB3_DELETE_GRACE_PERIOD` | + +--- + +#### hard-delete-default-duration + +Sets the default duration for hard deletion of data. + +**Default:** `90d` + +| influxdb3 serve option | Environment variable | +| :---------------------------------- | :-------------------------------------------- | +| `--hard-delete-default-duration` | `INFLUXDB3_HARD_DELETE_DEFAULT_DURATION` | + +--- + +### Telemetry + +- [telemetry-disable-upload](#telemetry-disable-upload) +- [telemetry-endpoint](#telemetry-endpoint) + +#### telemetry-disable-upload + +Disables the upload of telemetry data to InfluxData. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--telemetry-disable-upload` | `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` | + +--- + +#### telemetry-endpoint + +Specifies the endpoint for telemetry data uploads. + +| influxdb3 serve option | Environment variable | +| :----------------------- | :--------------------------------- | +| `--telemetry-endpoint` | `INFLUXDB3_TELEMETRY_ENDPOINT` | + +--- + +### TCP Listeners + +- [tcp-listener-file-path](#tcp-listener-file-path) +- [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) + +#### tcp-listener-file-path + +Specifies the file path for the TCP listener configuration. + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :----------------------------------- | +| `--tcp-listener-file-path` | `INFLUXDB3_TCP_LISTINER_FILE_PATH` | + +--- + +#### admin-token-recovery-tcp-listener-file-path + +Specifies the TCP listener file path for admin token recovery operations. + +| influxdb3 serve option | Environment variable | +| :---------------------------------------------- | :-------------------------------------------------------- | +| `--admin-token-recovery-tcp-listener-file-path` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_TCP_LISTENER_FILE_PATH` | + +{{% show-in "enterprise" %}} +--- + +### Experimental Features + +- [use-pacha-tree](#use-pacha-tree) + +#### use-pacha-tree + +Enables the experimental PachaTree storage engine for improved performance. + +> [!Warning] +> This is an experimental feature and should not be used in production environments. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--use-pacha-tree` | `INFLUXDB3_ENTERPRISE_USE_PACHA_TREE` | + +{{% /show-in %}} \ No newline at end of file diff --git a/content/shared/influxdb3-cli/delete/_index.md b/content/shared/influxdb3-cli/delete/_index.md index 81a47ffc6..563618314 100644 --- a/content/shared/influxdb3-cli/delete/_index.md +++ b/content/shared/influxdb3-cli/delete/_index.md @@ -1,5 +1,5 @@ -The `influxdb3 delete` command deletes a resource such as a database or a table. +The `influxdb3 delete` command deletes a resource such as a cache, a database, or a table. ## Usage @@ -19,6 +19,7 @@ influxdb3 delete | [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache | | [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache | | [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database | +| [token](/influxdb3/version/reference/cli/influxdb3/delete/token/) | Delete an authorization token from the server | | [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine | | help | Print command help or the help of a subcommand | {{% /show-in %}} @@ -30,6 +31,7 @@ influxdb3 delete | [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache | | [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache | | [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database | +| [token](/influxdb3/version/reference/cli/influxdb3/delete/token/) | Delete an authorization token from the server | | [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine | | help | Print command help or the help of a subcommand | {{% /show-in %}} diff --git a/content/shared/influxdb3-cli/delete/token.md b/content/shared/influxdb3-cli/delete/token.md new file mode 100644 index 000000000..73cfd688a --- /dev/null +++ b/content/shared/influxdb3-cli/delete/token.md @@ -0,0 +1,32 @@ + +The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server. + +## Usage + +```bash +influxdb3 delete token [OPTIONS] +``` + +## Options + +| Option | Description | Default | Environment | +|----------------|-----------------------------------------------------------------------------------|---------|------------------------| +| `--token` | _({{< req >}})_ The token for authentication with the {{% product-name %}} server | | `INFLUXDB3_AUTH_TOKEN` | +| `--token-name` | _({{< req >}})_ The name of the token to be deleted | | | +| `--tls-ca` | An optional arg to use a custom ca for useful for testing with self signed certs | | `INFLUXDB3_TLS_CA` | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | + +## Examples + +### Delete a token by name + +```bash +influxdb3 delete token --token-name TOKEN_TO_DELETE --token AUTH_TOKEN +``` + +### Show help for the command + +```bash +influxdb3 delete token --help +``` \ No newline at end of file From df1069ba65cbd54c98d3dc7a26cabed0cf82567a Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 5 Aug 2025 19:34:08 -0500 Subject: [PATCH 05/13] chore(3ent): Add --num-core config option (still missing from the CLI --help) --- .../enterprise/reference/config-options.md | 1622 +---------------- .../shared/influxdb3-cli/config-options.md | 14 +- 2 files changed, 17 insertions(+), 1619 deletions(-) diff --git a/content/influxdb3/enterprise/reference/config-options.md b/content/influxdb3/enterprise/reference/config-options.md index 50f81207b..cab8a5a77 100644 --- a/content/influxdb3/enterprise/reference/config-options.md +++ b/content/influxdb3/enterprise/reference/config-options.md @@ -8,1623 +8,9 @@ menu: parent: Reference name: Configuration options weight: 100 +source: /shared/influxdb3-cli/config-options.md --- -{{< product-name >}} lets you customize your server configuration by using -`influxdb3 serve` command options or by setting environment variables. - -## Configure your server - -Pass configuration options to the `influxdb serve` server using either command -options or environment variables. Command options take precedence over -environment variables. - -##### Example `influxdb3 serve` command options - - - -```sh -influxdb3 serve \ - --node-id node0 \ - --cluster-id cluster0 \ - --license-email example@email.com \ - --object-store file \ - --data-dir ~/.influxdb3 \ - --log-filter info -``` - -##### Example environment variables - - - -```sh -export INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=example@email.com -export INFLUXDB3_OBJECT_STORE=file -export INFLUXDB3_DB_DIR=~/.influxdb3 -export LOG_FILTER=info - -influxdb3 serve -``` - -## Server configuration options - -- [General](#general) - - [cluster-id](#cluster-id) - - [data-dir](#data-dir) - - [license-email](#license-email) - - [license-file](#license-file) - - [mode](#mode) - - [node-id](#node-id) - - [node-id-from-env](#node-id-from-env) - - [object-store](#object-store) - - [tls-key](#tls-key) - - [tls-cert](#tls-cert) - - [tls-minimum-versions](#tls-minimum-version) - - [without-auth](#without-auth) - - [disable-authz](#disable-authz) -- [AWS](#aws) - - [aws-access-key-id](#aws-access-key-id) - - [aws-secret-access-key](#aws-secret-access-key) - - [aws-default-region](#aws-default-region) - - [aws-endpoint](#aws-endpoint) - - [aws-session-token](#aws-session-token) - - [aws-allow-http](#aws-allow-http) - - [aws-skip-signature](#aws-skip-signature) -- [Google Cloud Service](#google-cloud-service) - - [google-service-account](#google-service-account) -- [Microsoft Azure](#microsoft-azure) - - [azure-storage-account](#azure-storage-account) - - [azure-storage-access-key](#azure-storage-access-key) -- [Object Storage](#object-storage) - - [bucket](#bucket) - - [object-store-connection-limit](#object-store-connection-limit) - - [object-store-http2-only](#object-store-http2-only) - - [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) - - [object-store-max-retries](#object-store-max-retries) - - [object-store-retry-timeout](#object-store-retry-timeout) - - [object-store-cache-endpoint](#object-store-cache-endpoint) -- [Logs](#logs) - - [log-filter](#log-filter) - - [log-destination](#log-destination) - - [log-format](#log-format) - - [query-log-size](#query-log-size) -- [Traces](#traces) - - [traces-exporter](#traces-exporter) - - [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) - - [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) - - [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) - - [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) - - [traces-jaeger-debug-name](#traces-jaeger-debug-name) - - [traces-jaeger-tags](#traces-jaeger-tags) - - [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) -- [DataFusion](#datafusion) - - [datafusion-num-threads](#datafusion-num-threads) - - [datafusion-runtime-type](#datafusion-runtime-type) - - [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) - - [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) - - [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) - - [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) - - [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) - - [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) - - [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) - - [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) - - [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) - - [datafusion-config](#datafusion-config) -- [HTTP](#http) - - [max-http-request-size](#max-http-request-size) - - [http-bind](#http-bind) - - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) -- [Memory](#memory) - - [exec-mem-pool-bytes](#exec-mem-pool-bytes) - - [buffer-mem-limit-mb](#buffer-mem-limit-mb) - - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) -- [Write-Ahead Log (WAL)](#write-ahead-log-wal) - - [wal-flush-interval](#wal-flush-interval) - - [wal-snapshot-size](#wal-snapshot-size) - - [wal-max-write-buffer-size](#wal-max-write-buffer-size) - - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) -- [Compaction](#compaction) - - [compaction-row-limit](#compaction-row-limit) - - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) - - [compaction-gen2-duration](#compaction-gen2-duration) - - [compaction-multipliers](#compaction-multipliers) - - [compaction-cleanup-wait](#compaction-cleanup-wait) - - [compaction-check-interval](#compaction-check-interval) - - [gen1-duration](#gen1-duration) -- [Caching](#caching) - - [preemptive-cache-age](#preemptive-cache-age) - - [parquet-mem-cache-size](#parquet-mem-cache-size) - - [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) - - [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) - - [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) - - [disable-parquet-mem-cache](#disable-parquet-mem-cache) - - [last-cache-eviction-interval](#last-cache-eviction-interval) - - [last-value-cache-disable-from-history](#last-value-cache-disable-from-history) - - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) - - [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history) - - [table-index-cache-max-entries](#table-index-cache-max-entries) - - [table-index-cache-concurrency-limit](#table-index-cache-concurrency-limit) - - [query-file-limit](#query-file-limit) -- [Processing Engine](#processing-engine) - - [plugin-dir](#plugin-dir) - - [virtual-env-location](#virtual-env-location) - - [package-manager](#package-manager) -- [Cluster Management](#cluster-management) - - [replication-interval](#replication-interval) - - [catalog-sync-interval](#catalog-sync-interval) - - [wait-for-running-ingestor](#wait-for-running-ingestor) -- [Resource Limits](#resource-limits) - - [num-cores](#num-cores) - - [num-database-limit](#num-database-limit) - - [num-table-limit](#num-table-limit) - - [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) -- [Data Lifecycle Management](#data-lifecycle-management) - - [gen1-lookback-duration](#gen1-lookback-duration) - - [retention-check-interval](#retention-check-interval) - - [delete-grace-period](#delete-grace-period) - - [hard-delete-default-duration](#hard-delete-default-duration) -- [WAL Advanced Options](#wal-advanced-options) - - [wal-replay-fail-on-error](#wal-replay-fail-on-error) - - [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) -- [Telemetry](#telemetry) - - [telemetry-disable-upload](#telemetry-disable-upload) - - [telemetry-endpoint](#telemetry-endpoint) -- [TCP Listeners](#tcp-listeners) - - [tcp-listener-file-path](#tcp-listener-file-path) - - [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) -- [Experimental Features](#experimental-features) - - [use-pacha-tree](#use-pacha-tree) - ---- - -### General - -- [cluster-id](#cluster-id) -- [data-dir](#data-dir) -- [license-email](#license-email) -- [license-file](#license-file) -- [mode](#mode) -- [node-id](#node-id) -- [object-store](#object-store) -- [query-file-limit](#query-file-limit) - -#### cluster-id - -Specifies the cluster identifier that prefixes the object store path for the Enterprise Catalog. -This value must be different than the [`--node-id`](#node-id) value. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------------- | -| `--cluster-id` | `INFLUXDB3_ENTERPRISE_CLUSTER_ID` | - ---- - -#### data-dir - -For the `file` object store, defines the location {{< product-name >}} uses to store files locally. -Required when using the `file` [object store](#object-store). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--data-dir` | `INFLUXDB3_DB_DIR` | - ---- - -#### license-email - -Specifies the email address to associate with your {{< product-name >}} license -and automatically responds to the interactive email prompt when the server starts. -This option is mutually exclusive with [license-file](#license-file). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--license-email` | `INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` | - ---- - -#### license-file - -Specifies the path to a license file for {{< product-name >}}. When provided, the license -file's contents are used instead of requesting a new license. -This option is mutually exclusive with [license-email](#license-email). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--license-file` | `INFLUXDB3_ENTERPRISE_LICENSE_FILE` | - ---- - -#### mode - -Sets the mode to start the server in. - -This option supports the following values: - -- `all` _(default)_: Enables all server modes -- `ingest`: Enables only data ingest capabilities -- `query`: Enables only query capabilities -- `compact`: Enables only compaction processes -- `process`: Enables only data processing capabilities - -You can specify multiple modes using a comma-delimited list (for example, `ingest,query`). - -**Default:** `all` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :-------------------------- | -| `--mode` | `INFLUXDB3_ENTERPRISE_MODE` | - ---- - -#### node-id - -Specifies the node identifier used as a prefix in all object store file paths. -This should be unique for any hosts sharing the same object store -configuration--for example, the same bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------------- | -| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | - - -#### node-id-from-env - -Specifies the node identifier used as a prefix in all object store file paths. -Takes the name of an environment variable as an argument and uses the value of that environment variable as the node identifier. -This option cannot be used with the `--node-id` option. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--node-id-from-env` | `INFLUXDB3_NODE_IDENTIFIER_FROM_ENV` | - -##### Example using --node-id-from-env - -```bash -export DATABASE_NODE=node0 && influxdb3 serve \ - --node-id-from-env DATABASE_NODE \ - --cluster-id cluster0 \ - --object-store file \ - --data-dir ~/.influxdb3/data -``` - ---- - -#### object-store - -Specifies which object storage to use to store Parquet files. -This option supports the following values: - -- `memory`: Effectively no object persistence -- `memory-throttled`: Like `memory` but with latency and throughput that somewhat resembles a cloud object store -- `file`: Stores objects in the local filesystem (must also set `--data-dir`) -- `s3`: Amazon S3 (must also set `--bucket`, `--aws-access-key-id`, `--aws-secret-access-key`, and possibly `--aws-default-region`) -- `google`: Google Cloud Storage (must also set `--bucket` and `--google-service-account`) -- `azure`: Microsoft Azure blob storage (must also set `--bucket`, `--azure-storage-account`, and `--azure-storage-access-key`) - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------- | -| `--object-store` | `INFLUXDB3_OBJECT_STORE` | - ---- - -#### tls-key - -The path to a key file for TLS to be enabled. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------- | -| `--tls-key` | `INFLUXDB3_TLS_KEY` | - ---- - -#### tls-cert - -The path to a cert file for TLS to be enabled. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------- | -| `--tls-cert` | `INFLUXDB3_TLS_CERT` | - ---- - -#### tls-minimum-version - -The minimum version for TLS. -Valid values are `tls-1.2` or `tls-1.3`. -Default is `tls-1.2`. - -| influxdb3 serve option | Environment variable | -| :---------------------- | :----------------------- | -| `--tls-minimum-version` | `INFLUXDB3_TLS_MINIMUM_VERSION` | - ---- - -#### without-auth - -Disables authentication for all server actions (CLI commands and API requests). -The server processes all requests without requiring tokens or authentication. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--without-auth` | `INFLUXDB3_START_WITHOUT_AUTH`| - ---- - -#### disable-authz - -Optionally disable authz by passing in a comma separated list of resources. -Valid values are `health`, `ping`, and `metrics`. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------- | -| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ`| - ---- - -### AWS - -- [aws-access-key-id](#aws-access-key-id) -- [aws-secret-access-key](#aws-secret-access-key) -- [aws-default-region](#aws-default-region) -- [aws-endpoint](#aws-endpoint) -- [aws-session-token](#aws-session-token) -- [aws-allow-http](#aws-allow-http) -- [aws-skip-signature](#aws-skip-signature) - -#### aws-access-key-id - -When using Amazon S3 as the object store, set this to an access key that has -permission to read from and write to the specified S3 bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-access-key-id` | `AWS_ACCESS_KEY_ID` | - ---- - -#### aws-secret-access-key - -When using Amazon S3 as the object store, set this to the secret access key that -goes with the specified access key ID. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | - ---- - -#### aws-default-region - -When using Amazon S3 as the object store, set this to the region that goes with -the specified bucket if different from the fallback value. - -**Default:** `us-east-1` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-default-region` | `AWS_DEFAULT_REGION` | - ---- - -#### aws-endpoint - -When using an Amazon S3 compatibility storage service, set this to the endpoint. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-endpoint` | `AWS_ENDPOINT` | - ---- - -#### aws-session-token - -When using Amazon S3 as an object store, set this to the session token. This is -handy when using a federated login or SSO and fetching credentials via the UI. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-session-token` | `AWS_SESSION_TOKEN` | - ---- - -#### aws-allow-http - -Allows unencrypted HTTP connections to AWS. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-allow-http` | `AWS_ALLOW_HTTP` | - ---- - -#### aws-skip-signature - -If enabled, S3 object stores do not fetch credentials and do not sign requests. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-skip-signature` | `AWS_SKIP_SIGNATURE` | - ---- - -### Google Cloud Service - -- [google-service-account](#google-service-account) - -#### google-service-account - -When using Google Cloud Storage as the object store, set this to the path to the -JSON file that contains the Google credentials. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :----------------------- | -| `--google-service-account` | `GOOGLE_SERVICE_ACCOUNT` | - ---- - -### Microsoft Azure - -- [azure-storage-account](#azure-storage-account) -- [azure-storage-access-key](#azure-storage-access-key) - -#### azure-storage-account - -When using Microsoft Azure as the object store, set this to the name you see -when navigating to **All Services > Storage accounts > `[name]`**. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--azure-storage-account` | `AZURE_STORAGE_ACCOUNT` | - ---- - -#### azure-storage-access-key - -When using Microsoft Azure as the object store, set this to one of the Key -values in the Storage account's **Settings > Access keys**. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--azure-storage-access-key` | `AZURE_STORAGE_ACCESS_KEY` | - ---- - -### Object Storage - -- [bucket](#bucket) -- [object-store-connection-limit](#object-store-connection-limit) -- [object-store-http2-only](#object-store-http2-only) -- [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) -- [object-store-max-retries](#object-store-max-retries) -- [object-store-retry-timeout](#object-store-retry-timeout) -- [object-store-cache-endpoint](#object-store-cache-endpoint) - -#### bucket - -Sets the name of the object storage bucket to use. Must also set -`--object-store` to a cloud object storage for this option to take effect. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--bucket` | `INFLUXDB3_BUCKET` | - ---- - -#### object-store-connection-limit - -When using a network-based object store, limits the number of connections to -this value. - -**Default:** `16` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :------------------------------ | -| `--object-store-connection-limit` | `OBJECT_STORE_CONNECTION_LIMIT` | - ---- - -#### object-store-http2-only - -Forces HTTP/2 connections to network-based object stores. - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :------------------------ | -| `--object-store-http2-only` | `OBJECT_STORE_HTTP2_ONLY` | - ---- - -#### object-store-http2-max-frame-size - -Sets the maximum frame size (in bytes/octets) for HTTP/2 connections. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--object-store-http2-max-frame-size` | `OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` | - ---- - -#### object-store-max-retries - -Defines the maximum number of times to retry a request. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--object-store-max-retries` | `OBJECT_STORE_MAX_RETRIES` | - ---- - -#### object-store-retry-timeout - -Specifies the maximum length of time from the initial request after which no -further retries are be attempted. - -| influxdb3 serve option | Environment variable | -| :----------------------------- | :--------------------------- | -| `--object-store-retry-timeout` | `OBJECT_STORE_RETRY_TIMEOUT` | - ---- - -#### object-store-cache-endpoint - -Sets the endpoint of an S3-compatible, HTTP/2-enabled object store cache. - -| influxdb3 serve option | Environment variable | -| :------------------------------ | :---------------------------- | -| `--object-store-cache-endpoint` | `OBJECT_STORE_CACHE_ENDPOINT` | - ---- - -### Logs - -- [log-filter](#log-filter) -- [log-destination](#log-destination) -- [log-format](#log-format) -- [query-log-size](#query-log-size) - -#### log-filter - -Sets the filter directive for logs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-filter` | `LOG_FILTER` | - ---- - -#### log-destination - -Specifies the destination for logs. - -**Default:** `stdout` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-destination` | `LOG_DESTINATION` | - ---- - -#### log-format - -Defines the message format for logs. - -This option supports the following values: - -- `full` _(default)_ - -**Default:** `full` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-format` | `LOG_FORMAT` | - ---- - -#### query-log-size - -Defines the size of the query log. Up to this many queries remain in the -log before older queries are evicted to make room for new ones. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--query-log-size` | `INFLUXDB3_QUERY_LOG_SIZE` | - ---- - -### Traces - -- [traces-exporter](#traces-exporter) -- [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) -- [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) -- [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) -- [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) -- [traces-jaeger-debug-name](#traces-jaeger-debug-name) -- [traces-jaeger-tags](#traces-jaeger-tags) -- [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) - -#### traces-exporter - -Sets the type of tracing exporter. - -**Default:** `none` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--traces-exporter` | `TRACES_EXPORTER` | - ---- - -#### traces-exporter-jaeger-agent-host - -Specifies the Jaeger agent network hostname for tracing. - -**Default:** `0.0.0.0` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-host` | `TRACES_EXPORTER_JAEGER_AGENT_HOST` | - ---- - -#### traces-exporter-jaeger-agent-port - -Defines the Jaeger agent network port for tracing. - -**Default:** `6831` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-port` | `TRACES_EXPORTER_JAEGER_AGENT_PORT` | - ---- - -#### traces-exporter-jaeger-service-name - -Sets the Jaeger service name for tracing. - -**Default:** `iox-conductor` - -| influxdb3 serve option | Environment variable | -| :-------------------------------------- | :------------------------------------ | -| `--traces-exporter-jaeger-service-name` | `TRACES_EXPORTER_JAEGER_SERVICE_NAME` | - ---- - -#### traces-exporter-jaeger-trace-context-header-name - -Specifies the header name used for passing trace context. - -**Default:** `uber-trace-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------------------- | :------------------------------------------------- | -| `--traces-exporter-jaeger-trace-context-header-name` | `TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` | - ---- - -#### traces-jaeger-debug-name - -Specifies the header name used for force sampling in tracing. - -**Default:** `jaeger-debug-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :---------------------------------- | -| `--traces-jaeger-debug-name` | `TRACES_EXPORTER_JAEGER_DEBUG_NAME` | - ---- - -#### traces-jaeger-tags - -Defines a set of `key=value` pairs to annotate tracing spans with. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--traces-jaeger-tags` | `TRACES_EXPORTER_JAEGER_TAGS` | - ---- - -#### traces-jaeger-max-msgs-per-second - -Specifies the maximum number of messages sent to a Jaeger service per second. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-jaeger-max-msgs-per-second` | `TRACES_JAEGER_MAX_MSGS_PER_SECOND` | - ---- - -### DataFusion - -- [datafusion-num-threads](#datafusion-num-threads) -- [datafusion-runtime-type](#datafusion-runtime-type) -- [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) -- [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) -- [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) -- [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) -- [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) -- [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) -- [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) -- [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) -- [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) -- [datafusion-config](#datafusion-config) - -#### datafusion-num-threads - -Sets the maximum number of DataFusion runtime threads to use. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :--------------------------------- | -| `--datafusion-num-threads` | `INFLUXDB3_DATAFUSION_NUM_THREADS` | - ---- - -#### datafusion-runtime-type - -Specifies the DataFusion tokio runtime type. - -This option supports the following values: - -- `current-thread` -- `multi-thread` _(default)_ -- `multi-thread-alt` - -**Default:** `multi-thread` - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :---------------------------------- | -| `--datafusion-runtime-type` | `INFLUXDB3_DATAFUSION_RUNTIME_TYPE` | - ---- - -#### datafusion-runtime-disable-lifo-slot - -Disables the LIFO slot of the DataFusion runtime. - -This option supports the following values: - -- `true` -- `false` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-disable-lifo-slot` | `INFLUXDB3_DATAFUSION_RUNTIME_DISABLE_LIFO_SLOT` | - ---- - -#### datafusion-runtime-event-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -tokio runtime polls for external events--for example: timers, I/O. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :-------------------------------------------- | -| `--datafusion-runtime-event-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_EVENT_INTERVAL` | - ---- - -#### datafusion-runtime-global-queue-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -runtime polls the global task queue. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------- | :--------------------------------------------------- | -| `--datafusion-runtime-global-queue-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_GLOBAL_QUEUE_INTERVAL` | - ---- - -#### datafusion-runtime-max-blocking-threads - -Specifies the limit for additional threads spawned by the DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------ | :-------------------------------------------------- | -| `--datafusion-runtime-max-blocking-threads` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_BLOCKING_THREADS` | - ---- - -#### datafusion-runtime-max-io-events-per-tick - -Configures the maximum number of events processed per tick by the tokio -DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :-------------------------------------------- | :---------------------------------------------------- | -| `--datafusion-runtime-max-io-events-per-tick` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_IO_EVENTS_PER_TICK` | - ---- - -#### datafusion-runtime-thread-keep-alive - -Sets a custom timeout for a thread in the blocking pool of the tokio DataFusion -runtime. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-thread-keep-alive` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_KEEP_ALIVE` | - ---- - -#### datafusion-runtime-thread-priority - -Sets the thread priority for tokio DataFusion runtime workers. - -**Default:** `10` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--datafusion-runtime-thread-priority` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_PRIORITY` | - ---- - -#### datafusion-max-parquet-fanout - -When multiple parquet files are required in a sorted way -(deduplication for example), specifies the maximum fanout. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :---------------------------------------- | -| `--datafusion-max-parquet-fanout` | `INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT` | - ---- - -#### datafusion-use-cached-parquet-loader - -Uses a cached parquet loader when reading parquet files from the object store. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-use-cached-parquet-loader` | `INFLUXDB3_DATAFUSION_USE_CACHED_PARQUET_LOADER` | - ---- - -#### datafusion-config - -Provides custom configuration to DataFusion as a comma-separated list of -`key:value` pairs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--datafusion-config` | `INFLUXDB3_DATAFUSION_CONFIG` | - ---- - -### HTTP - -- [max-http-request-size](#max-http-request-size) -- [http-bind](#http-bind) - -#### max-http-request-size - -Specifies the maximum size of HTTP requests. - -**Default:** `10485760` - -| influxdb3 serve option | Environment variable | -| :------------------------ | :-------------------------------- | -| `--max-http-request-size` | `INFLUXDB3_MAX_HTTP_REQUEST_SIZE` | - ---- - -#### http-bind - -Defines the address on which InfluxDB serves HTTP API requests. - -**Default:** `0.0.0.0:8181` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--http-bind` | `INFLUXDB3_HTTP_BIND_ADDR` | - ---- - -#### admin-token-recovery-http-bind - -Enables an admin token recovery HTTP server on a separate port. This server allows regenerating lost admin tokens without existing authentication. The server automatically shuts down after a successful token regeneration. - -> [!Warning] -> This option creates an unauthenticated endpoint that can regenerate admin tokens. Only use this when you have lost access to your admin token and ensure the server is only accessible from trusted networks. - -**Default:** `127.0.0.1:8182` (when enabled) - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | - -##### Example usage - -```bash -# Start server with recovery endpoint -influxdb3 serve --admin-token-recovery-http-bind - -# In another terminal, regenerate the admin token -influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 -``` - ---- - -### Memory - -- [exec-mem-pool-bytes](#exec-mem-pool-bytes) -- [buffer-mem-limit-mb](#buffer-mem-limit-mb) -- [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) - -#### exec-mem-pool-bytes - -Specifies the size of memory pool used during query execution. -Can be given as absolute value in bytes or as a percentage of the total available memory--for -example: `8000000000` or `10%`). - -**Default:** `20%` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------ | -| `--exec-mem-pool-bytes` | `INFLUXDB3_EXEC_MEM_POOL_BYTES` | - ---- - -#### force-snapshot-mem-threshold - - -Specifies the threshold for the internal memory buffer. Supports either a -percentage (portion of available memory) or absolute value in MB--for example: `70%` or `1000`. - -**Default:** `50%` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--force-snapshot-mem-threshold` | `INFLUXDB3_FORCE_SNAPSHOT_MEM_THRESHOLD` | - ---- - -### Write-Ahead Log (WAL) - -- [wal-flush-interval](#wal-flush-interval) -- [wal-snapshot-size](#wal-snapshot-size) -- [wal-max-write-buffer-size](#wal-max-write-buffer-size) -- [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) - -#### wal-flush-interval - -Specifies the interval to flush buffered data to a WAL file. Writes that wait -for WAL confirmation take up to this interval to complete. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------- | -| `--wal-flush-interval` | `INFLUXDB3_WAL_FLUSH_INTERVAL` | - ---- - -#### wal-snapshot-size - -Defines the number of WAL files to attempt to remove in a snapshot. This, -multiplied by the interval, determines how often snapshots are taken. - -**Default:** `600` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--wal-snapshot-size` | `INFLUXDB3_WAL_SNAPSHOT_SIZE` | - ---- - -#### wal-max-write-buffer-size - -Specifies the maximum number of write requests that can be buffered before a -flush must be executed and succeed. - -**Default:** `100000` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--wal-max-write-buffer-size` | `INFLUXDB3_WAL_MAX_WRITE_BUFFER_SIZE` | - ---- - -#### snapshotted-wal-files-to-keep - -Specifies the number of snapshotted WAL files to retain in the object store. -Flushing the WAL files does not clear the WAL files immediately; -they are deleted when the number of snapshotted WAL files exceeds this number. - -**Default:** `300` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :-------------------------------- | -| `--snapshotted-wal-files-to-keep` | `INFLUXDB3_NUM_WAL_FILES_TO_KEEP` | - ---- - -### Compaction - -- [compaction-row-limit](#compaction-row-limit) -- [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) -- [compaction-gen2-duration](#compaction-gen2-duration) -- [compaction-multipliers](#compaction-multipliers) -- [compaction-cleanup-wait](#compaction-cleanup-wait) -- [gen1-duration](#gen1-duration) - -#### compaction-row-limit - -Specifies the soft limit for the number of rows per file that the compactor -writes. The compactor may write more rows than this limit. - -**Default:** `1000000` - -| influxdb3 serve option | Environment variable | -| :----------------------- | :------------------------------------------ | -| `--compaction-row-limit` | `INFLUXDB3_ENTERPRISE_COMPACTION_ROW_LIMIT` | - ---- - -#### compaction-max-num-files-per-plan - -Sets the maximum number of files included in any compaction plan. - -**Default:** `500` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :------------------------------------------------------- | -| `--compaction-max-num-files-per-plan` | `INFLUXDB3_ENTERPRISE_COMPACTION_MAX_NUM_FILES_PER_PLAN` | - ---- - -#### compaction-gen2-duration - -Specifies the duration of the first level of compaction (gen2). Later levels of -compaction are multiples of this duration. This value should be equal to or -greater than the gen1 duration. - -**Default:** `20m` - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :---------------------------------------------- | -| `--compaction-gen2-duration` | `INFLUXDB3_ENTERPRISE_COMPACTION_GEN2_DURATION` | - ---- - -#### compaction-multipliers - -Specifies a comma-separated list of multiples defining the duration of each -level of compaction. The number of elements in the list determines the number of -compaction levels. The first element specifies the duration of the first level -(gen3); subsequent levels are multiples of the previous level. - -**Default:** `3,4,6,5` - -| influxdb3 serve option | Environment variable | -| :------------------------- | :-------------------------------------------- | -| `--compaction-multipliers` | `INFLUXDB3_ENTERPRISE_COMPACTION_MULTIPLIERS` | - ---- - -#### compaction-cleanup-wait - -Specifies the amount of time that the compactor waits after finishing a compaction run -to delete files marked as needing deletion during that compaction run. - -**Default:** `10m` - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :--------------------------------------------- | -| `--compaction-cleanup-wait` | `INFLUXDB3_ENTERPRISE_COMPACTION_CLEANUP_WAIT` | - -{{% show-in "enterprise" %}} - ---- - -#### compaction-check-interval - -Specifies how often the compactor checks for new compaction work to perform. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :----------------------------- | :------------------------------------------------ | -| `--compaction-check-interval` | `INFLUXDB3_ENTERPRISE_COMPACTION_CHECK_INTERVAL` | - -{{% /show-in %}} - ---- - - -#### gen1-duration - -Specifies the duration that Parquet files are arranged into. Data timestamps -land each row into a file of this duration. Supported durations are `1m`, -`5m`, and `10m`. These files are known as "generation 1" files, which the -compactor can merge into larger generations. - -**Default:** `10m` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------ | -| `--gen1-duration` | `INFLUXDB3_GEN1_DURATION` | - ---- - -### Caching - -- [preemptive-cache-age](#preemptive-cache-age) -- [parquet-mem-cache-size](#parquet-mem-cache-size) -- [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) -- [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) -- [disable-parquet-mem-cache](#disable-parquet-mem-cache) -- [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) -- [last-cache-eviction-interval](#last-cache-eviction-interval) -- [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) - -#### preemptive-cache-age - -Specifies the interval to prefetch into the Parquet cache during compaction. - -**Default:** `3d` - -| influxdb3 serve option | Environment variable | -| :----------------------- | :------------------------------- | -| `--preemptive-cache-age` | `INFLUXDB3_PREEMPTIVE_CACHE_AGE` | - ---- - -#### parquet-mem-cache-size - - -Specifies the size of the in-memory Parquet cache in megabytes or percentage of total available memory. - -**Default:** `20%` - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :---------------------------------- | -| `--parquet-mem-cache-size` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE` | - -#### parquet-mem-cache-prune-percentage - -Specifies the percentage of entries to prune during a prune operation on the -in-memory Parquet cache. - -**Default:** `0.1` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--parquet-mem-cache-prune-percentage` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_PERCENTAGE` | - ---- - -#### parquet-mem-cache-prune-interval - -Sets the interval to check if the in-memory Parquet cache needs to be pruned. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--parquet-mem-cache-prune-interval` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_INTERVAL` | - ---- - -#### parquet-mem-cache-query-path-duration - -A [duration](/influxdb3/enterprise/reference/glossary/#duration) that specifies -the time window for caching recent Parquet files in memory. Default is `5h`. - -Only files containing data with a timestamp between `now` and `now - duration` -are cached when accessed during queries--for example, with the default `5h` setting: - -- Current time: `2024-06-10 15:00:00` -- Cache window: Last 5 hours (`2024-06-10 10:00:00` to now) - -If a query requests data from `2024-06-09` (old) and `2024-06-10 14:00` (recent): - -- **Cached**: Parquet files with data from `2024-06-10 14:00` (within 5-hour window) -- **Not cached**: Parquet files with data from `2024-06-09` (outside 5-hour window) - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--parquet-mem-cache-query-path-duration` | `INFLUXDB3_PARQUET_MEM_CACHE_QUERY_PATH_DURATION` | - ---- - -#### disable-parquet-mem-cache - -Disables the in-memory Parquet cache. By default, the cache is enabled. - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--disable-parquet-mem-cache` | `INFLUXDB3_DISABLE_PARQUET_MEM_CACHE` | - ---- - -#### last-cache-eviction-interval - -Specifies the interval to evict expired entries from the Last-N-Value cache, -expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | - -{{% show-in "enterprise" %}} - ---- - -#### last-value-cache-disable-from-history - -Disables populating the last-N-value cache from historical data. -If disabled, the cache is still populated with data from the write-ahead log (WAL). - -| influxdb3 serve option | Environment variable | -| :---------------------------------------- | :---------------------------------------------------------- | -| `--last-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY`| - -{{% /show-in %}} - ---- - -#### distinct-cache-eviction-interval - -Specifies the interval to evict expired entries from the distinct value cache, -expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | - -{{% show-in "enterprise" %}} ---- - -#### distinct-value-cache-disable-from-history - -Disables populating the distinct value cache from historical data. -If disabled, the cache is still populated with data from the write-ahead log (WAL). - -| influxdb3 serve option | Environment variable | -| :-------------------------------------------- | :-------------------------------------------------------------- | -| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY`| - -{{% /show-in %}} - ---- - -#### table-index-cache-max-entries - -Specifies the maximum number of entries in the table index cache. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :--------------------------------- | :-------------------------------------------- | -| `--table-index-cache-max-entries` | `INFLUXDB3_TABLE_INDEX_CACHE_MAX_ENTRIES` | - ---- - -#### table-index-cache-concurrency-limit - -Limits the concurrency level for table index cache operations. - -**Default:** `8` - -| influxdb3 serve option | Environment variable | -| :---------------------------------------- | :------------------------------------------------- | -| `--table-index-cache-concurrency-limit` | `INFLUXDB3_TABLE_INDEX_CACHE_CONCURRENCY_LIMIT` | - ---- - -#### query-file-limit - -Limits the number of Parquet files a query can access. -If a query attempts to read more than this limit, {{% product-name %}} returns an error. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------- | -| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | - ---- - -### Processing Engine - -- [plugin-dir](#plugin-dir) -- [virtual-env-location](#virtual-env-location) -- [package-manager](#package-manager) - -#### plugin-dir - -Specifies the local directory that contains Python plugins and their test files. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------- | -| `--plugin-dir` | `INFLUXDB3_PLUGIN_DIR` | - ---- - -#### virtual-env-location - -Specifies the location of the Python virtual environment that the processing -engine uses. - -| influxdb3 serve option | Environment variable | -| :----------------------- | :--------------------- | -| `--virtual-env-location` | `VIRTUAL_ENV` | - ---- - -#### package-manager - -Specifies the Python package manager that the processing engine uses. - -This option supports the following values: - -- `discover` _(default)_: Automatically discover available package manager -- `pip`: Use pip package manager -- `uv`: Use uv package manager - -**Default:** `discover` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--package-manager` | `PACKAGE_MANAGER` | - -{{% show-in "enterprise" %}} - ---- - -### Cluster Management - - -- [replication-interval](#replication-interval) -- [catalog-sync-interval](#catalog-sync-interval) -- [wait-for-running-ingestor](#wait-for-running-ingestor) - -#### replication-interval - -Specifies the interval at which data replication occurs between cluster nodes. - -**Default:** `250ms` - -| influxdb3 serve option | Environment variable | -| :------------------------- | :------------------------------------------- | -| `--replication-interval` | `INFLUXDB3_ENTERPRISE_REPLICATION_INTERVAL` | - ---- - -#### catalog-sync-interval - -Defines how often the catalog synchronizes across cluster nodes. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------------------------ | -| `--catalog-sync-interval` | `INFLUXDB3_ENTERPRISE_CATALOG_SYNC_INTERVAL`| - ---- - -#### wait-for-running-ingestor - -Specifies how long to wait for a running ingestor during startup. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :------------------------------------------------ | -| `--wait-for-running-ingestor` | `INFLUXDB3_ENTERPRISE_WAIT_FOR_RUNNING_INGESTOR` | - -{{% /show-in %}} - -{{% show-in "enterprise" %}} ---- - -### Resource Limits - -- [num-cores](#num-cores) -- [num-database-limit](#num-database-limit) -- [num-table-limit](#num-table-limit) -- [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) - -#### num-cores - -Limits the number of CPU cores that InfluxDB Enterprise can use. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :-------------------------------- | -| `--num-cores` | `INFLUXDB3_ENTERPRISE_NUM_CORES` | - ---- - -#### num-database-limit - -Sets the maximum number of databases that can be created. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------------------------- | -| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` | - ---- - -#### num-table-limit - -Defines the maximum number of tables that can be created across all databases. - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------------- | -| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` | - ---- - -#### num-total-columns-per-table-limit - -Sets the maximum number of columns allowed per table. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :---------------------------------------------------------- | -| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | - -{{% /show-in %}} - ---- - -### Data Lifecycle Management - -- [gen1-lookback-duration](#gen1-lookback-duration) -- [retention-check-interval](#retention-check-interval) -- [delete-grace-period](#delete-grace-period) -- [hard-delete-default-duration](#hard-delete-default-duration) - -#### gen1-lookback-duration - -Specifies how far back to look when creating generation 1 Parquet files. - -**Default:** `24h` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :-------------------------------------- | -| `--gen1-lookback-duration` | `INFLUXDB3_GEN1_LOOKBACK_DURATION` | - ---- - -#### retention-check-interval - -Defines how often the system checks for data that should be deleted according to retention policies. - -**Default:** `1h` - -| influxdb3 serve option | Environment variable | -| :----------------------------- | :--------------------------------------- | -| `--retention-check-interval` | `INFLUXDB3_RETENTION_CHECK_INTERVAL` | - ---- - -#### delete-grace-period - -Specifies the grace period before permanently deleting data. - -**Default:** `24h` - -| influxdb3 serve option | Environment variable | -| :------------------------ | :--------------------------------- | -| `--delete-grace-period` | `INFLUXDB3_DELETE_GRACE_PERIOD` | - ---- - -#### hard-delete-default-duration - -Sets the default duration for hard deletion of data. - -**Default:** `90d` - -| influxdb3 serve option | Environment variable | -| :---------------------------------- | :-------------------------------------------- | -| `--hard-delete-default-duration` | `INFLUXDB3_HARD_DELETE_DEFAULT_DURATION` | - ---- - -### WAL Advanced Options - -- [wal-replay-fail-on-error](#wal-replay-fail-on-error) -- [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) - -#### wal-replay-fail-on-error - -Determines whether WAL replay should fail when encountering errors. - -**Default:** `false` - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------------------- | -| `--wal-replay-fail-on-error` | `INFLUXDB3_WAL_REPLAY_FAIL_ON_ERROR` | - ---- - -#### wal-replay-concurrency-limit - -Sets the maximum number of concurrent WAL replay operations. - -**Default:** `16` - -| influxdb3 serve option | Environment variable | -| :--------------------------------- | :------------------------------------------ | -| `--wal-replay-concurrency-limit` | `INFLUXDB3_WAL_REPLAY_CONCURRENCY_LIMIT` | - ---- - -### Telemetry - -- [telemetry-disable-upload](#telemetry-disable-upload) -- [telemetry-endpoint](#telemetry-endpoint) - -#### telemetry-disable-upload - -Disables the upload of telemetry data to InfluxData. - -**Default:** `false` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :-------------------------------------- | -| `--telemetry-disable-upload` | `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` | - ---- - -#### telemetry-endpoint - -Specifies the endpoint for telemetry data uploads. - -| influxdb3 serve option | Environment variable | -| :----------------------- | :--------------------------------- | -| `--telemetry-endpoint` | `INFLUXDB3_TELEMETRY_ENDPOINT` | - ---- - -### TCP Listeners - -- [tcp-listener-file-path](#tcp-listener-file-path) -- [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) - -#### tcp-listener-file-path - -Specifies the file path for the TCP listener configuration. - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :----------------------------------- | -| `--tcp-listener-file-path` | `INFLUXDB3_TCP_LISTINER_FILE_PATH` | - ---- - -#### admin-token-recovery-tcp-listener-file-path - -Specifies the TCP listener file path for admin token recovery operations. - -| influxdb3 serve option | Environment variable | -| :---------------------------------------------- | :-------------------------------------------------------- | -| `--admin-token-recovery-tcp-listener-file-path` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_TCP_LISTENER_FILE_PATH` | - -{{% show-in "enterprise" %}} ---- - -### Experimental Features - -- [use-pacha-tree](#use-pacha-tree) - -#### use-pacha-tree - -Enables the experimental PachaTree storage engine for improved performance. - -> [!Warning] -> This is an experimental feature and should not be used in production environments. - -**Default:** `false` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------------- | -| `--use-pacha-tree` | `INFLUXDB3_ENTERPRISE_USE_PACHA_TREE` | -{{% /show-in %}} + \ No newline at end of file diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 287a2521d..0b5b16215 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -1515,6 +1515,7 @@ Specifies how long to wait for a running ingestor during startup. ### Resource Limits + - [num-cores](#num-cores) - [num-database-limit](#num-database-limit) - [num-table-limit](#num-table-limit) @@ -1522,7 +1523,18 @@ Specifies how long to wait for a running ingestor during startup. #### num-cores -Limits the number of CPU cores that InfluxDB Enterprise can use. +Limits the number of CPU cores that the InfluxDB 3 Enterprise process can use when running on systems where resources are shared. +When specified, InfluxDB automatically assigns the number of DataFusion threads and IO threads based on the core count. + +**Thread assignment logic:** +- **1-2 cores**: 1 IO thread, 1 DataFusion thread +- **3 cores**: 1 IO thread, 2 DataFusion threads +- **4+ cores**: 2 IO threads, (n-2) DataFusion threads + +**Constraints:** +- Must be at least 2 +- Cannot exceed the number of cores available on the system +- Total thread count from other thread options cannot exceed the `num-cores` value | influxdb3 serve option | Environment variable | | :--------------------- | :-------------------------------- | From cf8cd90871bd773292c2772ead7da4e34c914e93 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 6 Aug 2025 12:51:02 -0500 Subject: [PATCH 06/13] Update content/shared/influxdb3-cli/config-options.md Co-authored-by: Scott Anderson --- content/shared/influxdb3-cli/config-options.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 0b5b16215..268b3ac93 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -27,9 +27,9 @@ influxdb3 serve \ ```sh -{{% show-in "enterprise" %}}export INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=example@email.com{{% /show-in %}} -{{% show-in "enterprise" %}}export INFLUXDB3_ENTERPRISE_CLUSTER_ID=cluster0{{% /show-in %}} -export INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node +{{% show-in "enterprise" %}}export INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=example@email.com +export INFLUXDB3_ENTERPRISE_CLUSTER_ID=cluster0 +{{% /show-in %}}export INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node export INFLUXDB3_OBJECT_STORE=file export INFLUXDB3_DB_DIR=~/.influxdb3 export LOG_FILTER=info From 99c9512a08ca101569e78e27c39e14d7b2b680b2 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Fri, 8 Aug 2025 08:50:35 -0600 Subject: [PATCH 07/13] add influxql integral function (#6236) Co-authored-by: Jason Stirnaman --- .../influxql-v3-reference/feature-support.md | 20 +++++++---------- .../functions/aggregates.md | 22 +++++-------------- 2 files changed, 14 insertions(+), 28 deletions(-) diff --git a/content/shared/influxql-v3-reference/feature-support.md b/content/shared/influxql-v3-reference/feature-support.md index feacf93fa..3fe18e3a1 100644 --- a/content/shared/influxql-v3-reference/feature-support.md +++ b/content/shared/influxql-v3-reference/feature-support.md @@ -65,11 +65,11 @@ The following table provides information about what metaqueries are available in ### Aggregate functions -| Function | Supported | -| :---------------------------------------------------------------------------------------- | :----------------------: | +| Function | Supported | +| :-------------------------------------------------------------------------------- | :----------------------: | | [COUNT()](/influxdb/version/reference/influxql/functions/aggregates/#count) | **{{< icon "check" >}}** | | [DISTINCT()](/influxdb/version/reference/influxql/functions/aggregates/#distinct) | **{{< icon "check" >}}** | -| INTEGRAL() | | +| [INTEGRAL()](/influxdb/version/reference/influxql/functions/aggregates/#integral) | **{{< icon "check" >}}** | | [MEAN()](/influxdb/version/reference/influxql/functions/aggregates/#mean) | **{{< icon "check" >}}** | | [MEDIAN()](/influxdb/version/reference/influxql/functions/aggregates/#median) | **{{< icon "check" >}}** | | [MODE()](/influxdb/version/reference/influxql/functions/aggregates/#mode) | **{{< icon "check" >}}** | @@ -77,29 +77,25 @@ The following table provides information about what metaqueries are available in | [STDDEV()](/influxdb/version/reference/influxql/functions/aggregates/#stddev) | **{{< icon "check" >}}** | | [SUM()](/influxdb/version/reference/influxql/functions/aggregates/#sum) | **{{< icon "check" >}}** | - - ### Selector functions -| Function | Supported | -| :------------------------------------------------------------------------------------------- | :----------------------: | +| Function | Supported | +| :----------------------------------------------------------------------------------- | :----------------------: | | [BOTTOM()](/influxdb/version/reference/influxql/functions/selectors/#bottom) | **{{< icon "check" >}}** | | [FIRST()](/influxdb/version/reference/influxql/functions/selectors/#first) | **{{< icon "check" >}}** | | [LAST()](/influxdb/version/reference/influxql/functions/selectors/#last) | **{{< icon "check" >}}** | | [MAX()](/influxdb/version/reference/influxql/functions/selectors/#max) | **{{< icon "check" >}}** | | [MIN()](/influxdb/version/reference/influxql/functions/selectors/#min) | **{{< icon "check" >}}** | | [PERCENTILE()](/influxdb/version/reference/influxql/functions/selectors/#percentile) | **{{< icon "check" >}}** | -| SAMPLE() | | +| SAMPLE() | | | [TOP()](/influxdb/version/reference/influxql/functions/selectors/#top) | **{{< icon "check" >}}** | ### Transformations -| Function | Supported | -| :--------------------------------------------------------------------------------------------------------------------------- | :----------------------: | +| Function | Supported | +| :------------------------------------------------------------------------------------------------------------------- | :----------------------: | | [ABS()](/influxdb/version/reference/influxql/functions/transformations/#abs) | **{{< icon "check" >}}** | | [ACOS()](/influxdb/version/reference/influxql/functions/transformations/#acos) | **{{< icon "check" >}}** | | [ASIN()](/influxdb/version/reference/influxql/functions/transformations/#asin) | **{{< icon "check" >}}** | diff --git a/content/shared/influxql-v3-reference/functions/aggregates.md b/content/shared/influxql-v3-reference/functions/aggregates.md index c4c2ecc4e..071c65d06 100644 --- a/content/shared/influxql-v3-reference/functions/aggregates.md +++ b/content/shared/influxql-v3-reference/functions/aggregates.md @@ -6,6 +6,7 @@ _Examples use the sample data set provided in the - [COUNT()](#count) - [DISTINCT()](#distinct) +- [INTEGRAL()](#integral) - [MEAN()](#mean) - [MEDIAN()](#median) - [MODE()](#mode) @@ -13,17 +14,6 @@ _Examples use the sample data set provided in the - [STDDEV()](#stddev) - [SUM()](#sum) - - - -> [!Important] -> #### Missing InfluxQL functions -> -> Some InfluxQL functions are in the process of being rearchitected to work with -> the InfluxDB 3 storage engine. If a function you need is not here, check the -> [InfluxQL feature support page](/influxdb/version/reference/influxql/feature-support/#function-support) -> for more information. - ## COUNT() Returns the number of non-null [field values](/influxdb/version/reference/glossary/#field-value). @@ -186,14 +176,14 @@ name: home {{% /expand %}} {{< /expand-wrapper >}} - +{{< /expand-wrapper >}} ## MEAN() From e506402aa10efc017c2a8438a26a73aaf1226a27 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 11 Aug 2025 12:11:12 -0500 Subject: [PATCH 08/13] chore(influxdb3-distrib): Recommend best practices for adjusting client timeouts when querying InfluxDB 3 distributed. - Closes influxdata/dar#526 - Adds Python client examples - Adds influxctl examples Co-authored-by: Reid Kaufmann --- .../client-libraries/python.md | 11 +- .../execute-queries/influxctl-cli.md | 31 +- .../query-timeout-best-practices.md | 10 + .../troubleshoot-and-optimize/troubleshoot.md | 5 +- .../client-libraries/python.md | 10 +- .../query-timeout-best-practices.md | 11 + .../troubleshoot-and-optimize/troubleshoot.md | 5 +- .../client-libraries/python.md | 10 +- .../execute-queries/influxctl-cli.md | 32 +- .../query-timeout-best-practices.md | 11 + .../troubleshoot-and-optimize/troubleshoot.md | 5 +- .../query-timeout-best-practices.md | 278 ++++++++++++++++++ 12 files changed, 405 insertions(+), 14 deletions(-) create mode 100644 content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md create mode 100644 content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md create mode 100644 content/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md create mode 100644 content/shared/influxdb3-query-guides/query-timeout-best-practices.md diff --git a/content/influxdb3/cloud-dedicated/query-data/execute-queries/client-libraries/python.md b/content/influxdb3/cloud-dedicated/query-data/execute-queries/client-libraries/python.md index ad173145e..0ae8e82cb 100644 --- a/content/influxdb3/cloud-dedicated/query-data/execute-queries/client-libraries/python.md +++ b/content/influxdb3/cloud-dedicated/query-data/execute-queries/client-libraries/python.md @@ -26,6 +26,7 @@ related: - /influxdb3/cloud-dedicated/reference/influxql/ - /influxdb3/cloud-dedicated/reference/sql/ - /influxdb3/cloud-dedicated/query-data/execute-queries/troubleshoot/ + - /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/ list_code_example: | ```py @@ -240,7 +241,8 @@ from influxdb_client_3 import InfluxDBClient3 client = InfluxDBClient3( host='{{< influxdb/host >}}', token='DATABASE_TOKEN', - database='DATABASE_NAME' + database='DATABASE_NAME', + timeout=60 # Set default timeout to 60 seconds ) ``` {{% /code-placeholders %}} @@ -275,6 +277,7 @@ client = InfluxDBClient3( host="{{< influxdb/host >}}", token='DATABASE_TOKEN', database='DATABASE_NAME', +timeout=60, # Set default timeout to 60 seconds flight_client_options=flight_client_options( tls_root_certs=cert)) ... @@ -332,7 +335,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="sql" + language="sql", + timeout=30 # Override default timeout for simple queries (30 seconds) ) print("\n#### View Schema information\n") @@ -377,7 +381,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="influxql" + language="influxql", + timeout=30 # Override default timeout for simple queries (30 seconds) ) print("\n#### View Schema information\n") diff --git a/content/influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli.md b/content/influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli.md index 92d886474..446f3d73e 100644 --- a/content/influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli.md +++ b/content/influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli.md @@ -13,6 +13,7 @@ influxdb3/cloud-dedicated/tags: [query, sql, influxql, influxctl, CLI] related: - /influxdb3/cloud-dedicated/reference/cli/influxctl/query/ - /influxdb3/cloud-dedicated/get-started/query/#execute-an-sql-query, Get started querying data + - /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/, Query timeout best practices - /influxdb3/cloud-dedicated/reference/sql/ - /influxdb3/cloud-dedicated/reference/influxql/ list_code_example: | @@ -142,6 +143,34 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database to query +## Query timeouts + +The [`influxctl --timeout` global flag](/influxdb3/cloud-dedicated/reference/cli/influxctl/) sets the maximum duration for API calls, including query requests. +If a query takes longer than the specified timeout, the operation will be canceled. + +### Timeout examples + +Use different timeout values based on your query type: + +{{% code-placeholders "DATABASE_(TOKEN|NAME)" %}} +```sh +# Shorter timeout for testing dashboard queries (10 seconds) +influxctl query \ + --timeout 10s \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + "SELECT AVG(temperature) FROM sensors WHERE time >= now() - INTERVAL '1 day'" + +# Longer timeout for analytical queries (5 minutes) +influxctl query \ + --timeout 5m \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + "SELECT room, AVG(temperature) FROM sensors WHERE time >= now() - INTERVAL '30 days' GROUP BY room" +``` +{{% /code-placeholders %}} + +For guidance on selecting appropriate timeout values, see [Query timeout best practices](/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/). ## Output format @@ -243,7 +272,7 @@ influxctl query \ {{% /influxdb/custom-timestamps %}} {{< expand-wrapper >}} -{{% expand "View example results with unix nanosecond timestamps" %}} +{{% expand "View example results with Unix nanosecond timestamps" %}} {{% influxdb/custom-timestamps %}} ``` +-------+--------+---------+------+---------------------+ diff --git a/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md b/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md new file mode 100644 index 000000000..b2608f4a4 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md @@ -0,0 +1,10 @@ +--- +title: Query timeout best practices +description: Learn how to set appropriate query timeouts to balance performance and resource protection. +menu: + influxdb3_cloud_dedicated: + name: Query timeout best practices + parent: Troubleshoot and optimize queries +weight: 205 +source: shared/influxdb3-query-guides/query-timeout-best-practices.md +--- \ No newline at end of file diff --git a/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/troubleshoot.md b/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/troubleshoot.md index 70ba8dd15..bd89d15e7 100644 --- a/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/troubleshoot.md +++ b/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/troubleshoot.md @@ -12,6 +12,7 @@ related: - /influxdb3/cloud-dedicated/query-data/sql/ - /influxdb3/cloud-dedicated/query-data/influxql/ - /influxdb3/cloud-dedicated/reference/client-libraries/v3/ + - /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/ aliases: - /influxdb3/cloud-dedicated/query-data/execute-queries/troubleshoot/ - /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/trace/ @@ -30,7 +31,9 @@ If a query doesn't return any data, it might be due to the following: - Your data falls outside the time range (or other conditions) in the query--for example, the InfluxQL `SHOW TAG VALUES` command uses a default time range of 1 day. - The query (InfluxDB server) timed out. -- The query client timed out. +- The query client timed out. + See [Query timeout best practices](/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/) + for guidance on setting appropriate timeouts. - The query return type is not supported by the client library. For example, array or list types may not be supported. In this case, use `array_to_string()` to convert the array value to a string--for example: diff --git a/content/influxdb3/cloud-serverless/query-data/execute-queries/client-libraries/python.md b/content/influxdb3/cloud-serverless/query-data/execute-queries/client-libraries/python.md index cd2545135..da203588d 100644 --- a/content/influxdb3/cloud-serverless/query-data/execute-queries/client-libraries/python.md +++ b/content/influxdb3/cloud-serverless/query-data/execute-queries/client-libraries/python.md @@ -27,6 +27,7 @@ related: - /influxdb3/cloud-serverless/reference/influxql/ - /influxdb3/cloud-serverless/reference/sql/ - /influxdb3/cloud-serverless/query-data/execute-queries/troubleshoot/ + - /influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices/ list_code_example: | ```py @@ -241,7 +242,8 @@ from influxdb_client_3 import InfluxDBClient3 client = InfluxDBClient3( host='{{< influxdb/host >}}', token='API_TOKEN', - database='BUCKET_NAME' + database='BUCKET_NAME', + timeout=30 # Set default timeout to 30 seconds for serverless ) ``` {{% /code-placeholders %}} @@ -332,7 +334,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="sql" + language="sql", + timeout=10 # Override default timeout for simple queries (10 seconds) ) print("\n#### View Schema information\n") @@ -377,7 +380,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="influxql" + language="influxql", + timeout=10 # Override default timeout for simple queries (10 seconds) ) print("\n#### View Schema information\n") diff --git a/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md b/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md new file mode 100644 index 000000000..f836c2843 --- /dev/null +++ b/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md @@ -0,0 +1,11 @@ +--- +title: Query timeout best practices +description: Learn how to set appropriate query timeouts to balance performance and resource protection. +menu: + influxdb3_cloud_serverless: + name: Query timeout best practices + parent: Troubleshoot and optimize queries + identifier: query-timeout-best-practices +weight: 201 +source: shared/influxdb3-query-guides/query-timeout-best-practices.md +--- \ No newline at end of file diff --git a/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/troubleshoot.md b/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/troubleshoot.md index 5c919861a..31edbebb9 100644 --- a/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/troubleshoot.md +++ b/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/troubleshoot.md @@ -12,6 +12,7 @@ related: - /influxdb3/cloud-serverless/query-data/sql/ - /influxdb3/cloud-serverless/query-data/influxql/ - /influxdb3/cloud-serverless/reference/client-libraries/v3/ + - /influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices/ aliases: - /influxdb3/cloud-serverless/query-data/execute-queries/troubleshoot/ --- @@ -29,7 +30,9 @@ If a query doesn't return any data, it might be due to the following: - Your data falls outside the time range (or other conditions) in the query--for example, the InfluxQL `SHOW TAG VALUES` command uses a default time range of 1 day. - The query (InfluxDB server) timed out. -- The query client timed out. +- The query client timed out. + See [Query timeout best practices](/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices/) + for guidance on setting appropriate timeouts. - The query return type is not supported by the client library. For example, array or list types may not be supported. In this case, use `array_to_string()` to convert the array value to a string--for example: diff --git a/content/influxdb3/clustered/query-data/execute-queries/client-libraries/python.md b/content/influxdb3/clustered/query-data/execute-queries/client-libraries/python.md index 444e109d2..c64e2f681 100644 --- a/content/influxdb3/clustered/query-data/execute-queries/client-libraries/python.md +++ b/content/influxdb3/clustered/query-data/execute-queries/client-libraries/python.md @@ -20,6 +20,7 @@ related: - /influxdb3/clustered/query-data/sql/ - /influxdb3/clustered/reference/influxql/ - /influxdb3/clustered/reference/sql/ + - /influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/ list_code_example: | ```py @@ -234,7 +235,8 @@ from influxdb_client_3 import InfluxDBClient3 client = InfluxDBClient3( host='{{< influxdb/host >}}', token='DATABASE_TOKEN', - database='DATABASE_NAME' + database='DATABASE_NAME', + timeout=60 # Set default timeout to 60 seconds ) ``` {{% /code-placeholders %}} @@ -325,7 +327,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="sql" + language="sql", + timeout=30 # Override default timeout for simple queries (30 seconds) ) print("\n#### View Schema information\n") @@ -370,7 +373,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="influxql" + language="influxql", + timeout=30 # Override default timeout for simple queries (30 seconds) ) print("\n#### View Schema information\n") diff --git a/content/influxdb3/clustered/query-data/execute-queries/influxctl-cli.md b/content/influxdb3/clustered/query-data/execute-queries/influxctl-cli.md index d218f03a1..f3f19e2aa 100644 --- a/content/influxdb3/clustered/query-data/execute-queries/influxctl-cli.md +++ b/content/influxdb3/clustered/query-data/execute-queries/influxctl-cli.md @@ -12,6 +12,7 @@ influxdb3/clustered/tags: [query, sql, influxql, influxctl, CLI] related: - /influxdb3/clustered/reference/cli/influxctl/query/ - /influxdb3/clustered/get-started/query/#execute-an-sql-query, Get started querying data + - /influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/, Query timeout best practices - /influxdb3/clustered/reference/sql/ - /influxdb3/clustered/reference/influxql/ list_code_example: | @@ -141,6 +142,35 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database to query +## Query timeouts + +The [`influxctl --timeout` global flag](/influxdb3/clustered/reference/cli/influxctl/) sets the maximum duration for API calls, including query requests. +If a query takes longer than the specified timeout, the operation will be canceled. + +### Timeout examples + +Use different timeout values based on your query type: + +{{% code-placeholders "DATABASE_(TOKEN|NAME)" %}} +```sh +# Shorter timeout for testing dashboard queries (10 seconds) +influxctl query \ + --timeout 10s \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + "SELECT * FROM sensors WHERE time >= now() - INTERVAL '1 hour' LIMIT 100" + +# Longer timeout for analytical queries (5 minutes) +influxctl query \ + --timeout 300s \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + "SELECT room, AVG(temperature) FROM sensors WHERE time >= now() - INTERVAL '30 days' GROUP BY room" +``` +{{% /code-placeholders %}} + +For guidance on selecting appropriate timeout values, see [Query timeout best practices](/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/). + ## Output format The `influxctl query` command supports the following output formats: @@ -241,7 +271,7 @@ influxctl query \ {{% /influxdb/custom-timestamps %}} {{< expand-wrapper >}} -{{% expand "View example results with unix nanosecond timestamps" %}} +{{% expand "View example results with Unix nanosecond timestamps" %}} {{% influxdb/custom-timestamps %}} ``` +-------+--------+---------+------+---------------------+ diff --git a/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md b/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md new file mode 100644 index 000000000..55efe064a --- /dev/null +++ b/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md @@ -0,0 +1,11 @@ +--- +title: Query timeout best practices +description: Learn how to set appropriate query timeouts to balance performance and resource protection. +menu: + influxdb3_clustered: + name: Query timeout best practices + parent: Troubleshoot and optimize queries + identifier: query-timeout-best-practices +weight: 201 +source: shared/influxdb3-query-guides/query-timeout-best-practices.md +--- \ No newline at end of file diff --git a/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/troubleshoot.md b/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/troubleshoot.md index 2ba49d45b..c3a97fad3 100644 --- a/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/troubleshoot.md +++ b/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/troubleshoot.md @@ -12,6 +12,7 @@ related: - /influxdb3/clustered/query-data/sql/ - /influxdb3/clustered/query-data/influxql/ - /influxdb3/clustered/reference/client-libraries/v3/ + - /influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/ aliases: - /influxdb3/clustered/query-data/execute-queries/troubleshoot/ --- @@ -29,7 +30,9 @@ If a query doesn't return any data, it might be due to the following: - Your data falls outside the time range (or other conditions) in the query--for example, the InfluxQL `SHOW TAG VALUES` command uses a default time range of 1 day. - The query (InfluxDB server) timed out. -- The query client timed out. +- The query client timed out. + See [Query timeout best practices](/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/) + for guidance on setting appropriate timeouts. - The query return type is not supported by the client library. For example, array or list types may not be supported. In this case, use `array_to_string()` to convert the array value to a string--for example: diff --git a/content/shared/influxdb3-query-guides/query-timeout-best-practices.md b/content/shared/influxdb3-query-guides/query-timeout-best-practices.md new file mode 100644 index 000000000..dbafb53b6 --- /dev/null +++ b/content/shared/influxdb3-query-guides/query-timeout-best-practices.md @@ -0,0 +1,278 @@ +Learn how to set appropriate query timeouts for InfluxDB 3 to balance performance and resource protection. + +Query timeouts prevent resource monopolization while allowing legitimate queries to complete successfully. +The key is finding the "goldilocks zone"—timeouts that are not too short (causing legitimate queries to fail) and not too long (allowing runaway queries to monopolize resources). + +- [Understanding query timeouts](#understanding-query-timeouts) +- [How query routing affects timeout strategy](#how-query-routing-affects-timeout-strategy) +- [Timeout configuration best practices](#timeout-configuration-best-practices) +- [InfluxDB 3 client library examples](#influxdb-3-client-library-examples) +- [Monitoring and troubleshooting](#monitoring-and-troubleshooting) + +## Understanding query timeouts + +Query timeouts define the maximum duration a query can run before being canceled. +In {{% product-name %}}, timeouts serve multiple purposes: + +- **Resource protection**: Prevent runaway queries from monopolizing system resources +- **Performance optimization**: Ensure responsive system behavior for time-sensitive operations +- **Cost control**: Limit compute resource consumption +- **User experience**: Provide predictable response times for applications and dashboards + +Query execution includes network latency, query planning, data retrieval, processing, and result serialization. + +### The "goldilocks zone" for query timeouts + +Optimal timeouts are: +- **Long enough**: To accommodate normal query execution under typical load +- **Short enough**: To prevent resource monopolization and provide reasonable feedback +- **Adaptive**: Adjusted based on query type, system load, and historical performance + +## How query routing affects timeout strategy + +InfluxDB 3 uses round-robin or load-balanced query routing across multiple replicas or nodes. +This creates a "checkout line" effect that influences timeout strategy. + +### The checkout line analogy + +Consider a grocery store with multiple checkout lines: +- Customers (queries) are distributed across lines (replicas/nodes) +- A slow customer (long-running query) can block others in the same line +- More checkout lines (replicas) provide more alternatives when retrying + +### Noisy neighbor effects + +In distributed systems: +- A single long-running query can impact other queries on the same node +- Shorter timeouts with retries can help queries find less congested nodes +- The effectiveness depends on the number of available replicas + +### When shorter timeouts help + +- **Multiple replicas available**: Retries can find less congested nodes +- **Uneven load distribution**: Some nodes may be significantly less busy +- **Temporary congestion**: Brief spikes in query load or resource usage + +### When shorter timeouts hurt + +- **Few replicas**: Limited alternatives for retries +- **System-wide congestion**: All nodes are equally busy +- **Expensive query planning**: High overhead for query preparation + +## Timeout configuration best practices + +### Make timeouts adjustable + +Configure timeouts that can be modified without service restarts using environment variables, configuration files, runtime APIs, or per-query overrides. + +### Use tiered timeout strategies + +Implement different timeout classes based on query characteristics. + +#### Starting point recommendations + +{{% hide-in "cloud-serverless" %}} +| Query Type | Recommended Timeout | Use Case | Rationale | +|------------|-------------------|-----------|-----------| +| UI and dashboard | 10 seconds | Interactive dashboards, real-time monitoring | Users expect immediate feedback | +| Generic default | 60 seconds | Application queries, APIs | Balances performance and reliability | +| Mixed workload | 2 minutes | Development, testing environments | Accommodates various query types | +| Analytical and background | 5 minutes | Reports, batch processing, ETL operations | Complex queries need more time | +{{% /hide-in %}} + +{{% show-in "cloud-serverless" %}} +| Query Type | Recommended Timeout | Use Case | Rationale | +|------------|-------------------|-----------|-----------| +| UI and dashboard | 10 seconds | Interactive dashboards, real-time monitoring | Users expect immediate feedback | +| Generic default | 30 seconds | Application queries, APIs | Serverless optimized for shorter queries | +| Mixed workload | 60 seconds | Development, testing environments | Limited by serverless execution model | +| Analytical and background | 2 minutes | Reports, batch processing | Complex queries within serverless limits | +{{% /show-in %}} + + +### Implement progressive timeout and retry logic + +Consider using more sophisticated retry strategies rather than simple fixed retries: + +1. **Exponential backoff**: Increase delay between retry attempts +2. **Jitter**: Add randomness to prevent thundering herd effects +3. **Circuit breakers**: Stop retries when system is overloaded +4. **Deadline propagation**: Respect overall operation deadlines + +### Warning signs + +Consider these indicators that timeouts may need adjustment: + +- **Timeouts > 10 minutes**: Usually indicates [query optimization](/influxdb3/version/query-data/troubleshoot-and-optimize/optimize-queries/) opportunities +- **High retry rates**: May indicate timeouts are too aggressive +- **Resource utilization spikes**: Long-running queries may need shorter timeouts +- **User complaints**: Balance between performance and user experience + +### Environment-specific considerations + +- **Development**: Use longer timeouts for debugging +- **Production**: Use shorter timeouts with monitoring +- **Cost-sensitive**: Use aggressive timeouts and [query optimization](/influxdb3/version/query-data/troubleshoot-and-optimize/optimize-queries/) + + +## InfluxDB 3 client library examples + +### Python client with timeout configuration + +Configure timeouts in the InfluxDB 3 Python client: + +{{% code-placeholders "DATABASE_NAME|HOST_URL|AUTH_TOKEN" %}} +```python +import influxdb_client_3 as InfluxDBClient3 + +# Configure different timeout classes (in seconds) +ui_timeout = 10 # For dashboard queries +api_timeout = 60 # For application queries +batch_timeout = 300 # For analytical queries + +# Create client with default timeout +client = InfluxDBClient3.InfluxDBClient3( + host="https://{{< influxdb/host >}}", + database="DATABASE_NAME", + token="AUTH_TOKEN", + timeout=api_timeout # Python client uses seconds +) + +# Quick query with short timeout +def query_latest_data(): + try: + result = client.query( + query="SELECT * FROM sensors WHERE time >= now() - INTERVAL '5 minutes' ORDER BY time DESC LIMIT 10", + timeout=ui_timeout + ) + return result.to_pandas() + except Exception as e: + print(f"Quick query failed: {e}") + return None + +# Analytical query with longer timeout +def query_daily_averages(): + query = """ + SELECT + DATE_TRUNC('day', time) as day, + room, + AVG(temperature) as avg_temp, + COUNT(*) as readings + FROM sensors + WHERE time >= now() - INTERVAL '30 days' + GROUP BY DATE_TRUNC('day', time), room + ORDER BY day DESC, room + """ + + try: + result = client.query( + query=query, + timeout=batch_timeout + ) + return result.to_pandas() + except Exception as e: + print(f"Analytical query failed: {e}") + return None +``` +{{% /code-placeholders %}} + +Replace the following: + +{{% hide-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query{{% /hide-in %}} +{{% show-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the bucket to query{{% /show-in %}} +{{% show-in "clustered,cloud-dedicated" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with _read_ access to the specified database.{{% /show-in %}} +{{% show-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: an [API token](/influxdb3/cloud-serverless/admin/tokens/) with _read_ access to the specified bucket.{{% /show-in %}} +{{% show-in "enterprise,core" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}with read permissions on the specified database{{% /show-in %}} + +### Basic retry logic implementation + +Implement simple retry strategies with progressive timeouts: + +```python +import time +import influxdb_client_3 as InfluxDBClient3 + +def query_with_retry(client, query: str, initial_timeout: int = 60, max_retries: int = 2): + """Execute query with basic retry and progressive timeout increase""" + + for attempt in range(max_retries + 1): + # Progressive timeout: increase timeout on each retry + timeout_seconds = initial_timeout + attempt * 30 + + try: + result = client.query( + query=query, + timeout=timeout_seconds + ) + return result + + except Exception as e: + if attempt == max_retries: + print(f"Query failed after {max_retries + 1} attempts: {e}") + raise + + # Simple backoff delay + delay = 2 * (attempt + 1) + print(f"Query attempt {attempt + 1} failed: {e}") + print(f"Retrying in {delay} seconds with timeout {timeout_seconds}s...") + time.sleep(delay) + + return None + +# Usage example +result = query_with_retry( + client=client, + query="SELECT * FROM large_table WHERE time >= now() - INTERVAL '1 day'", + initial_timeout=60, + max_retries=2 +) +``` + +## Monitoring and troubleshooting + +### Key metrics to monitor + +Track these essential timeout-related metrics: + +- **Query duration percentiles**: P50, P95, P99 execution times +- **Timeout rate**: Percentage of queries that exceed timeout limits +- **Error rates**: Timeout errors vs. other failure types +- **Resource utilization**: CPU and memory usage during query execution + +### Common timeout issues + +#### High timeout rates + +**Symptoms**: Many queries exceeding timeout limits + +**Common causes**: +- Timeouts set too aggressively for query complexity +- System resource constraints +- Inefficient query patterns + +**Solutions**: +1. Analyze query performance patterns +2. [Optimize slow queries](/influxdb3/version/query-data/troubleshoot-and-optimize/optimize-queries/) or increase timeouts appropriately +3. Scale system resources or add query result caching + +#### Inconsistent query performance + +**Symptoms**: Same queries sometimes fast, sometimes timeout + +**Common causes**: +- Uneven load distribution across nodes +- Resource contention from concurrent queries +- Background maintenance operations + +**Solutions**: +1. Implement load balancing or query queuing +2. Schedule maintenance during off-peak hours +3. Add query result caching for frequently accessed data + +> [!Note] +> Regular analysis of timeout patterns helps identify optimization opportunities and system scaling needs. \ No newline at end of file From eb3dc0b0ff3b22c71e1a23196de4b9a946fbb4ac Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 11 Aug 2025 15:51:10 -0500 Subject: [PATCH 09/13] improve and enhance write data troubleshooting for influxdb3 distributed - Update clustered, cloud-dedicated, and cloud-serverless to share enhanced troubleshooting content - Add comprehensive error capturing instructions with curl examples - Include client library debug logging samples (Python, Go, Java, JavaScript) - Provide detailed guidelines for reporting persistent write issues to support - Split HTTP status code tables by product type for clarity This standardizes troubleshooting across all InfluxDB 3 distributed editions. Closes influxdata/DAR#522 --- .../write-data/troubleshoot.md | 96 +---- .../write-data/troubleshoot.md | 98 +---- .../clustered/write-data/troubleshoot.md | 72 +--- .../troubleshoot-distributed.md | 352 ++++++++++++++++++ 4 files changed, 367 insertions(+), 251 deletions(-) create mode 100644 content/shared/influxdb3-write-guides/troubleshoot-distributed.md diff --git a/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md b/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md index 16dc7ad69..1b22a235c 100644 --- a/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md +++ b/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md @@ -12,99 +12,13 @@ menu: parent: Write data influxdb3/cloud-dedicated/tags: [write, line protocol, errors] related: + - /influxdb3/cloud-dedicated/get-started/write/ - /influxdb3/cloud-dedicated/reference/syntax/line-protocol/ - /influxdb3/cloud-dedicated/write-data/best-practices/ - /influxdb3/cloud-dedicated/reference/internals/durability/ +source: /shared/influxdb3-write-guides/troubleshoot-distributed.md --- -Learn how to avoid unexpected results and recover from errors when writing to {{% product-name %}}. - -- [Handle write responses](#handle-write-responses) - - [Review HTTP status codes](#review-http-status-codes) -- [Troubleshoot failures](#troubleshoot-failures) -- [Troubleshoot rejected points](#troubleshoot-rejected-points) - -## Handle write responses - -{{% product-name %}} does the following when you send a write request: - - 1. Validates the request. - 2. If successful, attempts to [ingest data](/influxdb3/cloud-dedicated/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes). - 3. Ingests or rejects data in the batch and returns one of the following HTTP status codes: - - - `204 No Content`: All data in the batch is ingested. - - `400 Bad Request`: Some (_when **partial writes** are configured for the cluster_) or all of the data has been rejected. Data that has not been rejected is ingested and queryable. - - The response body contains error details about [rejected points](#troubleshoot-rejected-points), up to 100 points. - - Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. - - To ensure that InfluxDB handles writes in the order you request them, - wait for the response before you send the next request. - -### Review HTTP status codes - -InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. -The `message` property of the response body may contain additional details about the error. -{{< product-name >}} returns one the following HTTP status codes for a write request: - -| HTTP response code | Response body | Description | -|:------------------------------|:------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `204 No Content"` | no response body | If InfluxDB ingested all of the data in the batch | -| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some (_when **partial writes** are configured for the cluster_) or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | -| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/cloud-dedicated/admin/tokens/) doesn't have [permission](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/cloud-dedicated/get-started/write/#write-line-protocol-to-influxdb) in write requests. | -| `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found | -| `422 "Unprocessable Entity"` | `message` contains details about the error | If the data isn't allowed (for example, falls outside of the database’s retention period). -| `500 "Internal server error"` | | Default status for an error | -| `503 "Service unavailable"` | | If the server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again. - -The `message` property of the response body may contain additional details about the error. -If your data did not write to the database, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). - -## Troubleshoot failures - -If you notice data is missing in your database, do the following: - -- Check the [HTTP status code](#review-http-status-codes) in the response. -- Check the `message` property in the response body for details about the error. -- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). -- Verify all lines contain valid syntax ([line protocol](/influxdb3/cloud-dedicated/reference/syntax/line-protocol/)). -- Verify the timestamps in your data match the [precision parameter](/influxdb3/cloud-dedicated/reference/glossary/#precision) in your request. -- Minimize payload size and network errors by [optimizing writes](/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes/). - -## Troubleshoot rejected points - -When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts. -If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points: - -- `code`: `"invalid"` -- `line`: the line number of the _first_ rejected point in the batch. -- `message`: a string that contains line-separated error messages, one message for each rejected point in the batch, up to 100 rejected points. - -InfluxDB rejects points for the following reasons: - -- a line protocol parsing error -- an invalid timestamp -- a schema conflict - -Schema conflicts occur when you try to write data that contains any of the following: - -- a wrong data type: the point falls within the same partition (default partitioning is measurement and day) as existing bucket data and contains a different data type for an existing field -- a tag and a field that use the same key - -### Example - -The following example shows a response body for a write request that contains two rejected points: - -```json -{ - "code": "invalid", - "line": 2, - "message": "failed to parse line protocol: - errors encountered on line(s): - error parsing line 2 (1-based): Invalid measurement was provided - error parsing line 4 (1-based): Unable to parse timestamp value '123461000000000000000000000000'" -} -``` - -Check for [field data type](/influxdb3/cloud-dedicated/reference/syntax/line-protocol/#data-types-and-format) differences between the rejected data point and points within the same database and partition--for example, did you attempt to write `string` data to an `int` field? + \ No newline at end of file diff --git a/content/influxdb3/cloud-serverless/write-data/troubleshoot.md b/content/influxdb3/cloud-serverless/write-data/troubleshoot.md index b4829cd78..36bbc2e40 100644 --- a/content/influxdb3/cloud-serverless/write-data/troubleshoot.md +++ b/content/influxdb3/cloud-serverless/write-data/troubleshoot.md @@ -12,101 +12,13 @@ menu: parent: Write data influxdb3/cloud-serverless/tags: [write, line protocol, errors] related: + - /influxdb3/cloud-serverless/get-started/write/ - /influxdb3/cloud-serverless/reference/syntax/line-protocol/ - /influxdb3/cloud-serverless/write-data/best-practices/ - /influxdb3/cloud-serverless/reference/internals/durability/ +source: /shared/influxdb3-write-guides/troubleshoot-distributed.md --- -Learn how to avoid unexpected results and recover from errors when writing to {{% product-name %}}. - - -- [Handle write responses](#handle-write-responses) - - [Review HTTP status codes](#review-http-status-codes) -- [Troubleshoot failures](#troubleshoot-failures) -- [Troubleshoot rejected points](#troubleshoot-rejected-points) - -## Handle write responses - -{{% product-name %}} does the following when you send a write request: - - 1. Validates the request. - 2. If successful, attempts to [ingest data](/influxdb3/cloud-serverless/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes). - 3. Ingests or rejects data from the batch and returns one of the following HTTP status codes: - - - `204 No Content`: All of the data is ingested and queryable. - - `400 Bad Request`: Some or all of the data has been rejected. Data that has not been rejected is ingested and queryable. - - The response body contains error details about [rejected points](#troubleshoot-rejected-points), up to 100 points. - - Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. - - To ensure that InfluxDB handles writes in the order you request them, - wait for the response before you send the next request. - -### Review HTTP status codes - -InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. -The `message` property of the response body may contain additional details about the error. -{{< product-name >}} returns one the following HTTP status codes for a write request: - -| HTTP response code | Response body | Description | -| :-------------------------------| :--------------------------------------------------------------- | :------------- | -| `204 "No Content"` | no response body | If InfluxDB ingested all of the data in the batch | -| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | -| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/cloud-serverless/admin/tokens/) doesn't have [permission](/influxdb3/cloud-serverless/admin/tokens/create-token/) to write to the bucket. See [examples using credentials](/influxdb3/cloud-serverless/get-started/write/#write-line-protocol-to-influxdb) in write requests. | -| `404 "Not found"` | requested **resource type** (for example, "organization" or "bucket"), and **resource name** | If a requested resource (for example, organization or bucket) wasn't found | -| `413 “Request too large”` | cannot read data: points in batch is too large | If a request exceeds the maximum [global limit](/influxdb3/cloud-serverless/admin/billing/limits/) | -| `429 “Too many requests”` | | If the number of requests exceeds the [adjustable service quota](/influxdb3/cloud-serverless/admin/billing/limits/#adjustable-service-quotas). The `Retry-After` header contains the number of seconds to wait before trying the write again. | If a request exceeds your plan's [adjustable service quotas](/influxdb3/cloud-serverless/admin/billing/limits/#adjustable-service-quotas) -| `500 "Internal server error"` | | Default status for an error | -| `503 "Service unavailable"` | | If the server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again. - -The `message` property of the response body may contain additional details about the error. -If your data did not write to the bucket, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). - -## Troubleshoot failures - -If you notice data is missing in your database, do the following: - -- Check the [HTTP status code](#review-http-status-codes) in the response. -- Check the `message` property in the response body for details about the error. -- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). -- Verify all lines contain valid syntax ([line protocol](/influxdb3/cloud-serverless/reference/syntax/line-protocol/)). -- Verify the timestamps in your data match the [precision parameter](/influxdb3/cloud-serverless/reference/glossary/#precision) in your request. -- Minimize payload size and network errors by [optimizing writes](/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes/). - -## Troubleshoot rejected points - -When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts. -If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points: - -- `code`: `"invalid"` -- `line`: the line number of the _first_ rejected point in the batch. -- `message`: a string that contains line-separated error messages, one message for each rejected point in the batch, up to 100 rejected points. - -InfluxDB rejects points for the following reasons: - -- a line protocol parsing error -- an invalid timestamp -- a schema conflict - -Schema conflicts occur when you try to write data that contains any of the following: - -- a wrong data type: the point falls within the same partition (default partitioning is measurement and day) as existing bucket data and contains a different data type for an existing field -- a tag and a field that use the same key - -### Example - -The following example shows a response body for a write request that contains two rejected points: - -```json -{ - "code": "invalid", - "line": 2, - "message": "failed to parse line protocol: - errors encountered on line(s): - error parsing line 2 (1-based): Invalid measurement was provided - error parsing line 4 (1-based): Unable to parse timestamp value '123461000000000000000000000000'" -} -``` - -Check for [field data type](/influxdb3/cloud-serverless/reference/syntax/line-protocol/#data-types-and-format) differences between the rejected data point and points within the same database and partition--for example, did you attempt to write `string` data to an `int` field? + \ No newline at end of file diff --git a/content/influxdb3/clustered/write-data/troubleshoot.md b/content/influxdb3/clustered/write-data/troubleshoot.md index 1dc7b94d0..8457208a5 100644 --- a/content/influxdb3/clustered/write-data/troubleshoot.md +++ b/content/influxdb3/clustered/write-data/troubleshoot.md @@ -13,75 +13,13 @@ menu: parent: Write data influxdb3/clustered/tags: [write, line protocol, errors] related: + - /influxdb3/clustered/get-started/write/ - /influxdb3/clustered/reference/syntax/line-protocol/ - /influxdb3/clustered/write-data/best-practices/ - /influxdb3/clustered/reference/internals/durability/ +source: /shared/influxdb3-write-guides/troubleshoot-distributed.md --- -Learn how to avoid unexpected results and recover from errors when writing to -{{% product-name %}}. - -- [Handle write responses](#handle-write-responses) - - [Review HTTP status codes](#review-http-status-codes) -- [Troubleshoot failures](#troubleshoot-failures) -- [Troubleshoot rejected points](#troubleshoot-rejected-points) - -## Handle write responses - -{{% product-name %}} does the following when you send a write request: - -1. Validates the request. -2. If successful, attempts to ingest data from the request body; otherwise, - responds with an [error status](#review-http-status-codes). -3. Ingests or rejects data in the batch and returns one of the following HTTP - status codes: - - - `204 No Content`: All data in the batch is ingested. - - `400 Bad Request`: Some or all of the data has been rejected. - Data that has not been rejected is ingested and queryable. - -The response body contains error details about -[rejected points](#troubleshoot-rejected-points), up to 100 points. - -Writes are synchronous--the response status indicates the final status of the -write and all ingested data is queryable. - -To ensure that InfluxDB handles writes in the order you request them, -wait for the response before you send the next request. - -### Review HTTP status codes - -InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. -The `message` property of the response body may contain additional details about the error. -Write requests return the following status codes: - -| HTTP response code | Message | Description | -| :-------------------------------| :--------------------------------------------------------------- | :------------- | -| `204 "Success"` | | If InfluxDB ingested the data | -| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | -| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/clustered/admin/tokens/) doesn't have [permission](/influxdb3/clustered/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/clustered/get-started/write/#write-line-protocol-to-influxdb) in write requests. | -| `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found | -| `500 "Internal server error"` | | Default status for an error | -| `503` "Service unavailable" | | If the server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again. - -If your data did not write to the database, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). - -## Troubleshoot failures - -If you notice data is missing in your database, do the following: - -- Check the `message` property in the response body for details about the error. -- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). -- Verify all lines contain valid syntax ([line protocol](/influxdb3/clustered/reference/syntax/line-protocol/)). -- Verify the timestamps in your data match the [precision parameter](/influxdb3/clustered/reference/glossary/#precision) in your request. -- Minimize payload size and network errors by [optimizing writes](/influxdb3/clustered/write-data/best-practices/optimize-writes/). - -## Troubleshoot rejected points - -InfluxDB rejects points that fall within the same partition (default partitioning -is by measurement and day) as existing bucket data and have a different data type -for an existing field. - -Check for [field data type](/influxdb3/clustered/reference/syntax/line-protocol/#data-types-and-format) -differences between the rejected data point and points within the same database -and partition--for example, did you attempt to write `string` data to an `int` field? + \ No newline at end of file diff --git a/content/shared/influxdb3-write-guides/troubleshoot-distributed.md b/content/shared/influxdb3-write-guides/troubleshoot-distributed.md new file mode 100644 index 000000000..6bceb7734 --- /dev/null +++ b/content/shared/influxdb3-write-guides/troubleshoot-distributed.md @@ -0,0 +1,352 @@ +Learn how to avoid unexpected results and recover from errors when writing to {{% product-name %}}. + +- [Handle write responses](#handle-write-responses) + - [Review HTTP status codes](#review-http-status-codes) +- [Troubleshoot failures](#troubleshoot-failures) +- [Troubleshoot rejected points](#troubleshoot-rejected-points) +- [Report write issues](#report-write-issues) + +## Handle write responses + +{{% product-name %}} does the following when you send a write request: + +1. Validates the request. +2. If successful, attempts to [ingest data](/influxdb3/version/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes). +3. Ingests or rejects data from the batch and returns one of the following HTTP status codes: + + - `204 No Content`: All of the data is ingested and queryable. + - `400 Bad Request`: Some {{% show-in "cloud-dedicated,clustered" %}}(_when **partial writes** are configured for the cluster_){{% /show-in %}} or all of the data has been rejected. Data that has not been rejected is ingested and queryable. + + The response body contains error details about [rejected points](#troubleshoot-rejected-points), up to 100 points. + +Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. + +To ensure that InfluxDB handles writes in the order you request them, +wait for the response before you send the next request. + +### Review HTTP status codes + +InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. +The `message` property of the response body may contain additional details about the error. +{{< product-name >}} returns one the following HTTP status codes for a write request: + +{{% show-in "clustered,cloud-dedicated" %}} +| HTTP response code | Response body | Description | +| :-------------------------------| :--------------------------------------------------------------- | :------------- | +| `204 "No Content"` | Empty | InfluxDB ingested all of the data in the batch | +| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | Some or all request data isn't allowed (for example, is malformed or falls outside of the database's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | +| `401 "Unauthorized"` | Empty | The `Authorization` request header is missing or malformed or the [token](/influxdb3/version/admin/tokens/) doesn't have permission to write to the database | +| `404 "Not found"` | A requested **resource type** (for example, "database"), and **resource name** | A requested resource wasn't found | +| `422 "Unprocessable Entity"` | `message` contains details about the error | The data isn't allowed (for example, falls outside of the database's retention period). | +| `500 "Internal server error"` | Empty | Default status for an error | +| `503 "Service unavailable"` | Empty | The server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again. | +{{% /show-in %}} + +{{% show-in "cloud-serverless" %}} +| HTTP response code | Response body | Description | +| :-------------------------------| :--------------------------------------------------------------- | :------------- | +| `204 "No Content"` | Empty | InfluxDB ingested all of the data in the batch | +| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | Some or all request data isn't allowed (for example, is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | +| `401 "Unauthorized"` | Empty | The `Authorization` request header is missing or malformed or the [token](/influxdb3/version/admin/tokens/) doesn't have permission to write to the bucket | +| `404 "Not found"` | A requested **resource type** (for example, "organization" or "bucket"), and **resource name** | A requested resource wasn't found | +| `413 "Request too large"` | cannot read data: points in batch is too large | The request exceeds the maximum [global limit](/influxdb3/cloud-serverless/admin/billing/limits/) | +| `422 "Unprocessable Entity"` | `message` contains details about the error | The data isn't allowed (for example, falls outside of the database's retention period). | +| `429 "Too many requests"` | Empty | The number of requests exceeds the [adjustable service quota](/influxdb3/cloud-serverless/admin/billing/limits/#adjustable-service-quotas). The `Retry-After` header contains the number of seconds to wait before trying the write again. | +| `500 "Internal server error"` | Empty | Default status for an error | +| `503 "Service unavailable"` | Empty | The server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again. | +{{% /show-in %}} + +The `message` property of the response body may contain additional details about the error. +If your data did not write to the {{% show-in "cloud-serverless" %}}bucket{{% /show-in %}}{{% show-in "cloud-dedicated,clustered" %}}database{{% /show-in %}}, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). + +## Troubleshoot failures + +If you notice data is missing in your database, do the following: + +- Check the [HTTP status code](#review-http-status-codes) in the response. +- Check the `message` property in the response body for details about the error. +- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). +- Verify all lines contain valid syntax ([line protocol](/influxdb3/version/reference/syntax/line-protocol/)). +- Verify the timestamps in your data match the [precision parameter](/influxdb3/version/reference/glossary/#precision) in your request. +- Minimize payload size and network errors by [optimizing writes](/influxdb3/version/write-data/best-practices/optimize-writes/). + +## Troubleshoot rejected points + +When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts. +If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points: + +- `code`: `"invalid"` +- `line`: the line number of the _first_ rejected point in the batch. +- `message`: a string that contains line-separated error messages, one message for each rejected point in the batch, up to 100 rejected points. Line numbers are 1-based. + +InfluxDB rejects points for the following reasons: + +- a line protocol parsing error +- an invalid timestamp +- a schema conflict + +Schema conflicts occur when you try to write data that contains any of the following: + +- a wrong data type: the point falls within the same partition (default partitioning is measurement and day) as existing {{% show-in "cloud-serverless" %}}bucket{{% /show-in %}} {{% show-in "cloud-dedicated,clustered" %}}database{{% /show-in %}} data and contains a different data type for an existing field +- a tag and a field that use the same key + +### Example + +The following example shows a response body for a write request that contains two rejected points: + +```json +{ + "code": "invalid", + "line": 2, + "message": "failed to parse line protocol: + errors encountered on line(s): + error parsing line 2 (1-based): Invalid measurement was provided + error parsing line 4 (1-based): Unable to parse timestamp value '123461000000000000000000000000'" +} +``` + +Check for [field data type](/influxdb3/version/reference/syntax/line-protocol/#data-types-and-format) differences between the rejected data point and points within the same database and partition (default partitioning +is by measurement and day)--for example, did you attempt to write `string` data to an `int` field? + +## Report write issues + +If you experience persistent write issues that you can't resolve using the troubleshooting steps above, use these guidelines to gather the necessary information when reporting the issue to InfluxData support. + +> [!Note] +> #### Before reporting an issue +> +> Ensure you have followed all [troubleshooting steps](#troubleshoot-failures) and +> reviewed the [write optimization guidelines](/influxdb3/version/write-data/best-practices/optimize-writes/) +> to rule out common configuration and data formatting issues. + +### Gather essential information + +When reporting write issues, provide the following information to help InfluxData engineers diagnose the problem: + +#### 1. Error details and logs + +**Capture the complete error response:** + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```bash +# Example: Capture both successful and failed write attempts +curl --silent --show-error --write-out "\nHTTP Status: %{http_code}\nResponse Time: %{time_total}s\n" \ + --request POST \ + "https://{{< influxdb/host >}}/write?db=DATABASE_NAME&precision=ns" \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --data-binary @problematic-data.lp \ + > write-error-response.txt 2>&1 +``` +{{% /code-placeholders %}} + +**Log client-side errors:** + +If using a client library, enable debug logging and capture the full exception details: + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Python](#) +[Go](#) +[Java](#) +[JavaScript](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```python +import logging +from influxdb_client_3 import InfluxDBClient3 + +# Enable debug logging +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger("influxdb_client_3") + +try: + client = InfluxDBClient3(token="AUTH_TOKEN", host="{{< influxdb/host >}}", database="DATABASE_NAME") + client.write(data) +except Exception as e: + logger.error(f"Write failed: {str(e)}") + # Include full stack trace in your report + import traceback + traceback.print_exc() +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```go +package main + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/InfluxCommunity/influxdb3-go" +) + +func main() { + // Enable debug logging + client, err := influxdb3.New(influxdb3.ClientConfig{ + Host: "https://{{< influxdb/host >}}", + Token: "AUTH_TOKEN", + Database: "DATABASE_NAME", + Debug: true, + }) + + if err != nil { + log.Fatal(err) + } + defer client.Close() + + err = client.Write(context.Background(), data) + if err != nil { + // Log the full error details + fmt.Fprintf(os.Stderr, "Write error: %+v\n", err) + } +} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```java +import com.influxdb.v3.client.InfluxDBClient; +import java.util.logging.Logger; +import java.util.logging.Level; + +public class WriteErrorExample { + private static final Logger logger = Logger.getLogger(WriteErrorExample.class.getName()); + + public static void main(String[] args) { + try (InfluxDBClient client = InfluxDBClient.getInstance( + "https://{{< influxdb/host >}}", + "AUTH_TOKEN".toCharArray(), + "DATABASE_NAME")) { + + client.writeRecord(data); + } catch (Exception e) { + logger.log(Level.SEVERE, "Write failed", e); + // Include full stack trace in your report + e.printStackTrace(); + } + } +} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```javascript +import { InfluxDBClient } from '@influxdata/influxdb3-client' + +const client = new InfluxDBClient({ + host: 'https://{{< influxdb/host >}}', + token: 'AUTH_TOKEN', + database: 'DATABASE_NAME' +}) + +try { + await client.write(data) +} catch (error) { + console.error('Write failed:', error) + // Include the full error object in your report + console.error('Full error details:', JSON.stringify(error, null, 2)) +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} +{{% /code-placeholders %}} + +Replace the following in your code: + +{{% hide-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query{{% /hide-in %}} +{{% show-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the bucket to query{{% /show-in %}} +{{% show-in "clustered,cloud-dedicated" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with _write_ access to the specified database.{{% /show-in %}} +{{% show-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: an [API token](/influxdb3/cloud-serverless/admin/tokens/) with _write_ access to the specified bucket.{{% /show-in %}} +{{% show-in "enterprise,core" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}} with write permissions on the specified database{{% /show-in %}} + +#### 2. Data samples and patterns + +**Provide representative data samples:** + +- Include 10-20 lines of the problematic line protocol data (sanitized if necessary) +- Show both successful and failing data formats +- Include timestamp ranges and precision used +- Specify if the issue occurs with specific measurements, tags, or field types + +**Example data documentation:** +``` +# Successful writes: +measurement1,tag1=value1,tag2=value2 field1=1.23,field2="text" 1640995200000000000 + +# Failing writes: +measurement1,tag1=value1,tag2=value2 field1="string",field2=456 1640995260000000000 +# Error: field data type conflict - field1 changed from float to string +``` + +#### 3. Write patterns and volume + +Document your write patterns: + +- **Frequency**: How often do you write data? (for example, every 10 seconds, once per minute) +- **Batch size**: How many points per write request? +- **Concurrency**: How many concurrent write operations? +- **Data retention**: How long is data retained? +- **Timing**: When did the issue first occur? Is it intermittent or consistent? + +#### 4. Environment details + +{{% show-in "clustered" %}} +**Cluster configuration:** +- InfluxDB Clustered version +- Kubernetes environment details +- Node specifications (CPU, memory, storage) +- Network configuration between client and cluster +{{% /show-in %}} + +**Client configuration:** +- Client library version and language +- Connection settings (timeouts, retry logic) +- Geographic location relative to cluster + +#### 5. Reproduction steps + +Provide step-by-step instructions to reproduce the issue: + +1. **Environment setup**: How to configure a similar environment +2. **Data preparation**: Sample data files or generation scripts +3. **Write commands**: Exact commands or code used +4. **Expected vs actual results**: What should happen vs what actually happens + +### Create a support package + +Organize all gathered information into a comprehensive package: + +**Files to include:** +- `write-error-response.txt` - HTTP response details +- `client-logs.txt` - Client library debug logs +- `sample-data.lp` - Representative line protocol data (sanitized) +- `reproduction-steps.md` - Detailed reproduction guide +- `environment-details.md` - {{% show-in "clustered" %}}Cluster and{{% /show-in %}} client configuration +- `write-patterns.md` - Usage patterns and volume information + +**Package format:** +```bash +# Create a timestamped support package +TIMESTAMP=$(date -Iseconds) +mkdir "write-issue-${TIMESTAMP}" +# Add all relevant files to the directory +tar -czf "write-issue-${TIMESTAMP}.tar.gz" "write-issue-${TIMESTAMP}/" +``` + +### Submit the issue + +Include the support package when contacting InfluxData support through your standard [support channels](#bug-reports-and-feedback), along with: + +- A clear description of the problem +- Impact assessment (how critical is this issue?) +- Any workarounds you've attempted +- Business context if the issue affects production systems + +This comprehensive information will help InfluxData engineers identify root causes and provide targeted solutions for your write issues. From c362792f3f45dfdc03f871f3aa5847ea239ab25b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 11 Aug 2025 15:55:58 -0500 Subject: [PATCH 10/13] feat(influxdb3): add 'partial writes' tag to distributed troubleshooting pages Add 'partial writes' tag to frontmatter for better content discoverability: - content/influxdb3/cloud-dedicated/write-data/troubleshoot.md - content/influxdb3/cloud-serverless/write-data/troubleshoot.md - content/influxdb3/clustered/write-data/troubleshoot.md This helps users find information about partial write scenarios across InfluxDB 3 distributed editions. --- content/influxdb3/cloud-dedicated/write-data/troubleshoot.md | 2 +- content/influxdb3/cloud-serverless/write-data/troubleshoot.md | 2 +- content/influxdb3/clustered/write-data/troubleshoot.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md b/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md index 1b22a235c..5c835639a 100644 --- a/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md +++ b/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md @@ -10,7 +10,7 @@ menu: influxdb3_cloud_dedicated: name: Troubleshoot issues parent: Write data -influxdb3/cloud-dedicated/tags: [write, line protocol, errors] +influxdb3/cloud-dedicated/tags: [write, line protocol, errors, partial writes] related: - /influxdb3/cloud-dedicated/get-started/write/ - /influxdb3/cloud-dedicated/reference/syntax/line-protocol/ diff --git a/content/influxdb3/cloud-serverless/write-data/troubleshoot.md b/content/influxdb3/cloud-serverless/write-data/troubleshoot.md index 36bbc2e40..2408d031c 100644 --- a/content/influxdb3/cloud-serverless/write-data/troubleshoot.md +++ b/content/influxdb3/cloud-serverless/write-data/troubleshoot.md @@ -10,7 +10,7 @@ menu: influxdb3_cloud_serverless: name: Troubleshoot issues parent: Write data -influxdb3/cloud-serverless/tags: [write, line protocol, errors] +influxdb3/cloud-serverless/tags: [write, line protocol, errors, partial writes] related: - /influxdb3/cloud-serverless/get-started/write/ - /influxdb3/cloud-serverless/reference/syntax/line-protocol/ diff --git a/content/influxdb3/clustered/write-data/troubleshoot.md b/content/influxdb3/clustered/write-data/troubleshoot.md index 8457208a5..8520ee59e 100644 --- a/content/influxdb3/clustered/write-data/troubleshoot.md +++ b/content/influxdb3/clustered/write-data/troubleshoot.md @@ -11,7 +11,7 @@ menu: influxdb3_clustered: name: Troubleshoot issues parent: Write data -influxdb3/clustered/tags: [write, line protocol, errors] +influxdb3/clustered/tags: [write, line protocol, errors, partial writes] related: - /influxdb3/clustered/get-started/write/ - /influxdb3/clustered/reference/syntax/line-protocol/ From 81e86e3db2d282f6ca1fb65213233676c231b73f Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 12 Aug 2025 09:32:51 -0500 Subject: [PATCH 11/13] Apply suggestions from code review Co-authored-by: Scott Anderson --- .../troubleshoot-distributed.md | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/content/shared/influxdb3-write-guides/troubleshoot-distributed.md b/content/shared/influxdb3-write-guides/troubleshoot-distributed.md index 6bceb7734..802d518fd 100644 --- a/content/shared/influxdb3-write-guides/troubleshoot-distributed.md +++ b/content/shared/influxdb3-write-guides/troubleshoot-distributed.md @@ -127,8 +127,7 @@ When reporting write issues, provide the following information to help InfluxDat **Capture the complete error response:** -{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} -```bash +```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" } # Example: Capture both successful and failed write attempts curl --silent --show-error --write-out "\nHTTP Status: %{http_code}\nResponse Time: %{time_total}s\n" \ --request POST \ @@ -138,13 +137,11 @@ curl --silent --show-error --write-out "\nHTTP Status: %{http_code}\nResponse Ti --data-binary @problematic-data.lp \ > write-error-response.txt 2>&1 ``` -{{% /code-placeholders %}} **Log client-side errors:** If using a client library, enable debug logging and capture the full exception details: -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} {{< code-tabs-wrapper >}} {{% code-tabs %}} [Python](#) @@ -153,7 +150,7 @@ If using a client library, enable debug logging and capture the full exception d [JavaScript](#) {{% /code-tabs %}} {{% code-tab-content %}} -```python +```python { placeholders="DATABASE_NAME|AUTH_TOKEN" } import logging from influxdb_client_3 import InfluxDBClient3 @@ -172,7 +169,7 @@ except Exception as e: ``` {{% /code-tab-content %}} {{% code-tab-content %}} -```go +```go { placeholders="DATABASE_NAME|AUTH_TOKEN" } package main import ( @@ -207,7 +204,7 @@ func main() { ``` {{% /code-tab-content %}} {{% code-tab-content %}} -```java +```java { placeholders="DATABASE_NAME|AUTH_TOKEN" } import com.influxdb.v3.client.InfluxDBClient; import java.util.logging.Logger; import java.util.logging.Level; @@ -232,7 +229,7 @@ public class WriteErrorExample { ``` {{% /code-tab-content %}} {{% code-tab-content %}} -```javascript +```javascript { placeholders="DATABASE_NAME|AUTH_TOKEN" } import { InfluxDBClient } from '@influxdata/influxdb3-client' const client = new InfluxDBClient({ @@ -251,7 +248,6 @@ try { ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} -{{% /code-placeholders %}} Replace the following in your code: From a7e8fe8625942cadca6bd7e444250f165a062577 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 12 Aug 2025 16:09:08 -0500 Subject: [PATCH 12/13] fix(influxdb3): address Reid's review feedback on query timeout best practices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Address reidkaufmann's review comments on query timeout documentation: Technical accuracy fixes: - Replace "node/nodes" terminology with "querier/queriers" throughout - Remove AI hallucination: "query result caching" (doesn't exist in IOx) - Replace with proper cache recommendations for Enterprise/Core users - Remove vague "background maintenance" references - Add specific compaction performance variation cause - Fix "round-robin or load-balanced" → "round-robin query routing" Content improvements: - Enhance checkout line analogy with explanation of noisy neighbors - Add experimental query guidance for reducing impact on other users - Replace unactionable solutions with customer-controllable actions - Restore missing flexibility recommendations and link to the code sample Technical updates: - Update code blocks to use new placeholder syntax - Add related links between cache documentation and timeout strategies - Fix numbering consistency in Solutions sections --- .../query-timeout-best-practices.md | 9 ++- .../query-timeout-best-practices.md | 8 ++- .../query-timeout-best-practices.md | 9 ++- .../query-timeout-best-practices.md | 59 ++++++++++++------- 4 files changed, 61 insertions(+), 24 deletions(-) diff --git a/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md b/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md index b2608f4a4..fb4a1c875 100644 --- a/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md +++ b/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md @@ -6,5 +6,12 @@ menu: name: Query timeout best practices parent: Troubleshoot and optimize queries weight: 205 +related: + - /influxdb3/cloud-dedicated/reference/client-libraries/v3/ + - /influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli/ source: shared/influxdb3-query-guides/query-timeout-best-practices.md ---- \ No newline at end of file +--- + +