2015-06-09 17:27:27 +00:00
|
|
|
### Welcome to the InfluxDB configuration file.
|
|
|
|
|
2015-06-11 05:38:49 +00:00
|
|
|
# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com
|
|
|
|
# The data includes raft id (random 8 bytes), os, arch, version, and metadata.
|
|
|
|
# We don't track ip addresses of servers reporting. This is only used
|
|
|
|
# to track the number of instances running and the versions, which
|
|
|
|
# is very helpful for us.
|
|
|
|
# Change this option to true to disable reporting.
|
|
|
|
reporting-disabled = false
|
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [meta]
|
|
|
|
###
|
|
|
|
### Controls the parameters for the Raft consensus group that stores metadata
|
|
|
|
### about the InfluxDB cluster.
|
|
|
|
###
|
2013-12-11 18:10:37 +00:00
|
|
|
|
2015-06-08 21:44:52 +00:00
|
|
|
[meta]
|
|
|
|
dir = "/var/opt/influxdb/meta"
|
|
|
|
hostname = "localhost"
|
|
|
|
bind-address = ":8088"
|
|
|
|
retention-autocreate = true
|
|
|
|
election-timeout = "1s"
|
|
|
|
heartbeat-timeout = "1s"
|
|
|
|
leader-lease-timeout = "500ms"
|
|
|
|
commit-timeout = "50ms"
|
2015-06-05 20:40:18 +00:00
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [data]
|
|
|
|
###
|
2015-07-02 14:21:30 +00:00
|
|
|
### Controls where the actual shard data for InfluxDB lives and how it is
|
|
|
|
### flushed from the WAL. "dir" may need to be changed to a suitable place
|
|
|
|
### for your system, but the WAL settings are an advanced configuration. The
|
|
|
|
### defaults should work for most systems.
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
|
2015-06-08 21:44:52 +00:00
|
|
|
[data]
|
|
|
|
dir = "/var/opt/influxdb/data"
|
2015-08-18 20:59:54 +00:00
|
|
|
|
2015-10-06 22:30:11 +00:00
|
|
|
# Controls the engine type for new shards. Options are b1, bz1, or tsm1.
|
|
|
|
# b1 is the 0.9.2 storage engine, bz1 is the 0.9.3 and 0.9.4 engine.
|
|
|
|
# tsm1 is the 0.9.5 engine
|
2015-09-17 02:05:20 +00:00
|
|
|
# engine ="bz1"
|
|
|
|
|
2015-08-18 20:59:54 +00:00
|
|
|
# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't
|
|
|
|
# apply to any new shards created after upgrading to a version > 0.9.3.
|
2015-07-11 16:36:00 +00:00
|
|
|
max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB.
|
2015-07-15 20:59:07 +00:00
|
|
|
wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush.
|
|
|
|
wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed.
|
2015-06-09 17:27:27 +00:00
|
|
|
|
2015-08-18 20:59:54 +00:00
|
|
|
# These are the WAL settings for the storage engine >= 0.9.3
|
|
|
|
wal-dir = "/var/opt/influxdb/wal"
|
|
|
|
wal-enable-logging = true
|
|
|
|
|
|
|
|
# When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to
|
|
|
|
# flush to the index
|
|
|
|
# wal-ready-series-size = 25600
|
|
|
|
|
|
|
|
# Flush and compact a partition once this ratio of series are over the ready size
|
|
|
|
# wal-compaction-threshold = 0.6
|
|
|
|
|
|
|
|
# Force a flush and compaction if any series in a partition gets above this size in bytes
|
|
|
|
# wal-max-series-size = 2097152
|
|
|
|
|
|
|
|
# Force a flush of all series and full compaction if there have been no writes in this
|
|
|
|
# amount of time. This is useful for ensuring that shards that are cold for writes don't
|
|
|
|
# keep a bunch of data cached in memory and in the WAL.
|
|
|
|
# wal-flush-cold-interval = "10m"
|
|
|
|
|
|
|
|
# Force a partition to flush its largest series if it reaches this approximate size in
|
|
|
|
# bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory.
|
|
|
|
# The more memory you have, the bigger this can be.
|
|
|
|
# wal-partition-size-threshold = 20971520
|
|
|
|
|
2015-09-17 02:26:23 +00:00
|
|
|
# Whether queries should be logged before execution. Very useful for troubleshooting, but will
|
|
|
|
# log any sensitive data contained within a query.
|
2015-09-17 02:27:49 +00:00
|
|
|
# query-log-enabled = true
|
2015-09-17 02:26:23 +00:00
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [cluster]
|
|
|
|
###
|
|
|
|
### Controls non-Raft cluster behavior, which generally includes how data is
|
|
|
|
### shared across shards.
|
|
|
|
###
|
2014-05-29 22:22:21 +00:00
|
|
|
|
2015-06-08 21:44:52 +00:00
|
|
|
[cluster]
|
2015-09-30 23:56:23 +00:00
|
|
|
shard-writer-timeout = "10s" # The time within which a shard must respond to write.
|
2015-07-02 16:34:29 +00:00
|
|
|
write-timeout = "5s" # The time within which a write operation must complete on the cluster.
|
2015-04-09 18:37:13 +00:00
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [retention]
|
|
|
|
###
|
|
|
|
### Controls the enforcement of retention policies for evicting old data.
|
|
|
|
###
|
|
|
|
|
2015-06-08 21:44:52 +00:00
|
|
|
[retention]
|
|
|
|
enabled = true
|
2015-09-05 06:30:13 +00:00
|
|
|
check-interval = "30m"
|
2015-06-09 17:27:27 +00:00
|
|
|
|
2015-09-01 03:28:24 +00:00
|
|
|
###
|
2015-09-02 23:14:03 +00:00
|
|
|
### Controls the system self-monitoring, statistics and diagnostics.
|
2015-09-01 03:28:24 +00:00
|
|
|
###
|
2015-09-18 22:31:33 +00:00
|
|
|
### The internal database for monitoring data is created automatically if
|
2015-09-18 22:30:02 +00:00
|
|
|
### if it does not already exist. The target retention within this database
|
|
|
|
### is called 'monitor' and is also created with a retention period of 7 days
|
|
|
|
### and a replication factor of 1, if it does not exist. In all cases the
|
|
|
|
### this retention policy is configured as the default for the database.
|
2015-09-01 03:28:24 +00:00
|
|
|
|
|
|
|
[monitor]
|
2015-09-02 23:14:03 +00:00
|
|
|
store-enabled = true # Whether to record statistics internally.
|
2015-09-01 03:28:24 +00:00
|
|
|
store-database = "_internal" # The destination database for recorded statistics
|
2015-09-09 03:37:15 +00:00
|
|
|
store-interval = "10s" # The interval at which to record statistics
|
2015-09-01 03:28:24 +00:00
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [admin]
|
|
|
|
###
|
2015-07-22 23:49:12 +00:00
|
|
|
### Controls the availability of the built-in, web-based admin interface. If HTTPS is
|
|
|
|
### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
2015-01-02 21:41:02 +00:00
|
|
|
|
2013-12-11 18:10:37 +00:00
|
|
|
[admin]
|
2015-06-08 21:44:52 +00:00
|
|
|
enabled = true
|
|
|
|
bind-address = ":8083"
|
2015-07-22 23:49:12 +00:00
|
|
|
https-enabled = false
|
|
|
|
https-certificate = "/etc/ssl/influxdb.pem"
|
2013-12-11 18:10:37 +00:00
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [http]
|
|
|
|
###
|
|
|
|
### Controls how the HTTP endpoints are configured. These are the primary
|
|
|
|
### mechanism for getting data into and out of InfluxDB.
|
|
|
|
###
|
|
|
|
|
2015-06-08 21:44:52 +00:00
|
|
|
[http]
|
|
|
|
enabled = true
|
|
|
|
bind-address = ":8086"
|
|
|
|
auth-enabled = false
|
|
|
|
log-enabled = true
|
|
|
|
write-tracing = false
|
|
|
|
pprof-enabled = false
|
2015-07-17 23:54:06 +00:00
|
|
|
https-enabled = false
|
|
|
|
https-certificate = "/etc/ssl/influxdb.pem"
|
2014-03-24 15:33:28 +00:00
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [[graphite]]
|
|
|
|
###
|
|
|
|
### Controls one or many listeners for Graphite data.
|
|
|
|
###
|
|
|
|
|
2015-06-09 10:38:07 +00:00
|
|
|
[[graphite]]
|
|
|
|
enabled = false
|
2015-09-25 01:50:24 +00:00
|
|
|
# database = "graphite"
|
2015-06-10 23:40:08 +00:00
|
|
|
# bind-address = ":2003"
|
|
|
|
# protocol = "tcp"
|
|
|
|
# consistency-level = "one"
|
|
|
|
# name-separator = "."
|
2015-06-12 03:48:24 +00:00
|
|
|
|
2015-07-22 00:48:52 +00:00
|
|
|
# These next lines control how batching works. You should have this enabled
|
2015-09-07 02:16:00 +00:00
|
|
|
# otherwise you could get dropped metrics or poor performance. Batching
|
2015-07-22 00:48:52 +00:00
|
|
|
# will buffer points in memory if you have many coming in.
|
|
|
|
|
|
|
|
# batch-size = 1000 # will flush if this many points get buffered
|
2015-09-08 22:18:14 +00:00
|
|
|
# batch-pending = 5 # number of batches that may be pending in memory
|
2015-07-22 00:48:52 +00:00
|
|
|
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
|
|
|
|
2015-06-12 03:48:24 +00:00
|
|
|
## "name-schema" configures tag names for parsing the metric name from graphite protocol;
|
|
|
|
## separated by `name-separator`.
|
|
|
|
## The "measurement" tag is special and the corresponding field will become
|
|
|
|
## the name of the metric.
|
|
|
|
## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as
|
|
|
|
## {
|
|
|
|
## measurement: "cpu",
|
|
|
|
## tags: {
|
|
|
|
## "type": "server",
|
|
|
|
## "host": "localhost,
|
|
|
|
## "device": "cpu0"
|
|
|
|
## }
|
|
|
|
## }
|
|
|
|
# name-schema = "type.host.measurement.device"
|
|
|
|
|
|
|
|
## If set to true, when the input metric name has more fields than `name-schema` specified,
|
|
|
|
## the extra fields will be ignored.
|
|
|
|
## Otherwise an error will be logged and the metric rejected.
|
|
|
|
# ignore-unnamed = true
|
2015-06-09 10:38:07 +00:00
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [collectd]
|
|
|
|
###
|
|
|
|
### Controls the listener for collectd data.
|
|
|
|
###
|
|
|
|
|
2015-01-30 00:36:27 +00:00
|
|
|
[collectd]
|
2015-06-08 21:44:52 +00:00
|
|
|
enabled = false
|
2015-06-09 10:38:07 +00:00
|
|
|
# bind-address = ""
|
|
|
|
# database = ""
|
|
|
|
# typesdb = ""
|
2014-09-30 21:20:09 +00:00
|
|
|
|
2015-07-22 00:48:52 +00:00
|
|
|
# These next lines control how batching works. You should have this enabled
|
2015-09-07 02:16:00 +00:00
|
|
|
# otherwise you could get dropped metrics or poor performance. Batching
|
2015-07-22 00:48:52 +00:00
|
|
|
# will buffer points in memory if you have many coming in.
|
|
|
|
|
|
|
|
# batch-size = 1000 # will flush if this many points get buffered
|
2015-09-08 22:18:14 +00:00
|
|
|
# batch-pending = 5 # number of batches that may be pending in memory
|
2015-07-22 00:48:52 +00:00
|
|
|
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [opentsdb]
|
|
|
|
###
|
|
|
|
### Controls the listener for OpenTSDB data.
|
|
|
|
###
|
|
|
|
|
2015-03-29 16:26:03 +00:00
|
|
|
[opentsdb]
|
2015-06-08 21:44:52 +00:00
|
|
|
enabled = false
|
2015-09-25 21:04:37 +00:00
|
|
|
# bind-address = ":4242"
|
|
|
|
# database = "opentsdb"
|
2015-06-09 10:38:07 +00:00
|
|
|
# retention-policy = ""
|
2015-09-25 21:04:37 +00:00
|
|
|
# consistency-level = "one"
|
|
|
|
# tls-enabled = false
|
|
|
|
# certificate= ""
|
2015-03-29 16:26:03 +00:00
|
|
|
|
2015-09-08 23:19:50 +00:00
|
|
|
# These next lines control how batching works. You should have this enabled
|
|
|
|
# otherwise you could get dropped metrics or poor performance. Only points
|
|
|
|
# metrics received over the telnet protocol undergo batching.
|
|
|
|
|
|
|
|
# batch-size = 1000 # will flush if this many points get buffered
|
2015-09-09 02:35:19 +00:00
|
|
|
# batch-pending = 5 # number of batches that may be pending in memory
|
2015-09-08 23:19:50 +00:00
|
|
|
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
2015-08-10 23:21:27 +00:00
|
|
|
### [[udp]]
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
2015-08-10 23:21:27 +00:00
|
|
|
### Controls the listeners for InfluxDB line protocol data via UDP.
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
|
2015-08-10 23:21:27 +00:00
|
|
|
[[udp]]
|
2015-06-08 21:44:52 +00:00
|
|
|
enabled = false
|
2015-06-09 10:38:07 +00:00
|
|
|
# bind-address = ""
|
|
|
|
# database = ""
|
2015-09-28 22:16:38 +00:00
|
|
|
# retention-policy = ""
|
2015-07-22 00:48:52 +00:00
|
|
|
|
|
|
|
# These next lines control how batching works. You should have this enabled
|
2015-09-07 02:16:00 +00:00
|
|
|
# otherwise you could get dropped metrics or poor performance. Batching
|
2015-07-22 00:48:52 +00:00
|
|
|
# will buffer points in memory if you have many coming in.
|
|
|
|
|
|
|
|
# batch-size = 1000 # will flush if this many points get buffered
|
2015-09-08 22:18:14 +00:00
|
|
|
# batch-pending = 5 # number of batches that may be pending in memory
|
2015-07-22 00:48:52 +00:00
|
|
|
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
2015-04-23 22:03:28 +00:00
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [continuous_queries]
|
|
|
|
###
|
|
|
|
### Controls how continuous queries are run within InfluxDB.
|
|
|
|
###
|
|
|
|
|
2015-06-08 21:44:52 +00:00
|
|
|
[continuous_queries]
|
2015-07-22 01:02:04 +00:00
|
|
|
log-enabled = true
|
2015-06-08 21:44:52 +00:00
|
|
|
enabled = true
|
|
|
|
recompute-previous-n = 2
|
2015-06-09 10:38:07 +00:00
|
|
|
recompute-no-older-than = "10m"
|
2015-06-08 21:44:52 +00:00
|
|
|
compute-runs-per-interval = 10
|
2015-06-09 10:38:07 +00:00
|
|
|
compute-no-more-than = "2m"
|
2015-06-08 21:44:52 +00:00
|
|
|
|
2015-06-09 17:27:27 +00:00
|
|
|
###
|
|
|
|
### [hinted-handoff]
|
|
|
|
###
|
|
|
|
### Controls the hinted handoff feature, which allows nodes to temporarily
|
|
|
|
### store queued data when one node of a cluster is down for a short period
|
|
|
|
### of time.
|
|
|
|
###
|
|
|
|
|
2015-06-08 21:44:52 +00:00
|
|
|
[hinted-handoff]
|
|
|
|
enabled = true
|
|
|
|
dir = "/var/opt/influxdb/hh"
|
|
|
|
max-size = 1073741824
|
2015-06-09 17:27:27 +00:00
|
|
|
max-age = "168h"
|
2015-06-08 21:44:52 +00:00
|
|
|
retry-rate-limit = 0
|
2015-10-03 06:30:00 +00:00
|
|
|
|
2015-10-01 19:20:35 +00:00
|
|
|
# Hinted handoff will start retrying writes to down nodes at a rate of once per second.
|
|
|
|
# If any error occurs, it will backoff in an exponential manner, until the interval
|
|
|
|
# reaches retry-max-interval. Once writes to all nodes are successfully completed the
|
|
|
|
# interval will reset to retry-interval.
|
2015-06-08 21:44:52 +00:00
|
|
|
retry-interval = "1s"
|
2015-10-01 19:20:35 +00:00
|
|
|
retry-max-interval = "1m"
|