influxdb/etc/config.sample.toml

335 lines
11 KiB
TOML
Raw Normal View History

### Welcome to the InfluxDB configuration file.
# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com
# The data includes raft id (random 8 bytes), os, arch, version, and metadata.
# We don't track ip addresses of servers reporting. This is only used
# to track the number of instances running and the versions, which
# is very helpful for us.
# Change this option to true to disable reporting.
reporting-disabled = false
# we'll try to get the hostname automatically, but if it the os returns something
# that isn't resolvable by other servers in the cluster, use this option to
# manually set the hostname
# hostname = "localhost"
###
### [meta]
###
### Controls the parameters for the Raft consensus group that stores metadata
### about the InfluxDB cluster.
###
[meta]
# Controls if this node should run the metaservice and participate in the Raft group
enabled = true
# Where the metadata/raft database is stored
dir = "/var/lib/influxdb/meta"
# The default address to bind to
bind-address = ":8088"
# The default address to bind the API to
http-bind-address = ":8091"
https-enabled = false
https-certificate = ""
retention-autocreate = true
# The default election timeout for the store
election-timeout = "1s"
# The default heartbeat timeout for the store
heartbeat-timeout = "1s"
# The default leader lease for the store
leader-lease-timeout = "500ms"
# The default commit timeout for the store
commit-timeout = "50ms"
# If trace log messages are printed for the meta service
cluster-tracing = false
# The default for auto promoting a node to a raft node when needed
raft-promotion-enabled = true
# If log messages are printed for the meta service
logging-enabled = true
pprof-enabled = false
# The default duration for leases.
lease-duration = "1m0s"
###
### [data]
###
### Controls where the actual shard data for InfluxDB lives and how it is
### flushed from the WAL. "dir" may need to be changed to a suitable place
### for your system, but the WAL settings are an advanced configuration. The
### defaults should work for most systems.
###
[data]
# Controls if this node holds time series data shards in the cluster
enabled = true
dir = "/var/lib/influxdb/data"
2015-08-18 20:59:54 +00:00
# These are the WAL settings for the storage engine >= 0.9.3
wal-dir = "/var/lib/influxdb/wal"
wal-logging-enabled = true
2015-12-14 23:58:27 +00:00
data-logging-enabled = true
2015-08-18 20:59:54 +00:00
# Whether queries should be logged before execution. Very useful for troubleshooting, but will
# log any sensitive data contained within a query.
2015-09-17 02:27:49 +00:00
# query-log-enabled = true
# Settings for the TSM engine
# CacheMaxMemorySize is the maximum size a shard's cache can
# reach before it starts rejecting writes.
# cache-max-memory-size = 524288000
# CacheSnapshotMemorySize is the size at which the engine will
# snapshot the cache and write it to a TSM file, freeing up memory
# cache-snapshot-memory-size = 26214400
2015-12-13 21:58:05 +00:00
# CacheSnapshotWriteColdDuration is the length of time at
# which the engine will snapshot the cache and write it to
# a new TSM file if the shard hasn't received writes or deletes
# cache-snapshot-write-cold-duration = "1h"
# MinCompactionFileCount is the minimum number of TSM files
# that need to exist before a compaction cycle will run
# compact-min-file-count = 3
# CompactFullWriteColdDuration is the duration at which the engine
# will compact all TSM files in a shard if it hasn't received a
# write or delete
# compact-full-write-cold-duration = "24h"
# MaxPointsPerBlock is the maximum number of points in an encoded
# block in a TSM file. Larger numbers may yield better compression
# but could incur a performance peanalty when querying
# max-points-per-block = 1000
2015-10-20 22:46:53 +00:00
###
### [hinted-handoff]
###
### Controls the hinted handoff feature, which allows nodes to temporarily
### store queued data when one node of a cluster is down for a short period
### of time.
###
[hinted-handoff]
enabled = true
dir = "/var/lib/influxdb/hh"
2015-10-20 22:46:53 +00:00
max-size = 1073741824
max-age = "168h"
retry-rate-limit = 0
# Hinted handoff will start retrying writes to down nodes at a rate of once per second.
# If any error occurs, it will backoff in an exponential manner, until the interval
# reaches retry-max-interval. Once writes to all nodes are successfully completed the
# interval will reset to retry-interval.
retry-interval = "1s"
retry-max-interval = "1m"
# Interval between running checks for data that should be purged. Data is purged from
# hinted-handoff queues for two reasons. 1) The data is older than the max age, or
# 2) the target node has been dropped from the cluster. Data is never dropped until
# it has reached max-age however, for a dropped node or not.
purge-interval = "1h"
###
### [cluster]
###
### Controls non-Raft cluster behavior, which generally includes how data is
### shared across shards.
###
[cluster]
shard-writer-timeout = "5s" # The time within which a remote shard must respond to a write request.
write-timeout = "10s" # The time within which a write request must complete on the cluster.
###
### [retention]
###
### Controls the enforcement of retention policies for evicting old data.
###
[retention]
enabled = true
check-interval = "30m"
###
### [shard-precreation]
###
### Controls the precreation of shards, so they are available before data arrives.
### Only shards that, after creation, will have both a start- and end-time in the
### future, will ever be created. Shards are never precreated that would be wholly
### or partially in the past.
[shard-precreation]
enabled = true
check-interval = "10m"
advance-period = "30m"
2015-09-01 03:28:24 +00:00
###
### Controls the system self-monitoring, statistics and diagnostics.
2015-09-01 03:28:24 +00:00
###
### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database.
2015-09-01 03:28:24 +00:00
[monitor]
store-enabled = true # Whether to record statistics internally.
2015-09-01 03:28:24 +00:00
store-database = "_internal" # The destination database for recorded statistics
store-interval = "10s" # The interval at which to record statistics
2015-09-01 03:28:24 +00:00
###
### [admin]
###
### Controls the availability of the built-in, web-based admin interface. If HTTPS is
### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
###
[admin]
enabled = true
bind-address = ":8083"
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
###
### [http]
###
### Controls how the HTTP endpoints are configured. These are the primary
### mechanism for getting data into and out of InfluxDB.
###
[http]
enabled = true
bind-address = ":8086"
auth-enabled = false
log-enabled = true
write-tracing = false
pprof-enabled = false
2015-07-17 23:54:06 +00:00
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
max-row-limit = 10000
###
### [[graphite]]
###
### Controls one or many listeners for Graphite data.
###
[[graphite]]
enabled = false
# database = "graphite"
# bind-address = ":2003"
# protocol = "tcp"
# consistency-level = "one"
# These next lines control how batching works. You should have this enabled
2015-09-07 02:16:00 +00:00
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
2016-02-05 21:44:11 +00:00
# batch-size = 5000 # will flush if this many points get buffered
# batch-pending = 10 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
2015-11-05 22:43:36 +00:00
# udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
2016-02-05 22:08:24 +00:00
### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
2016-02-05 21:44:11 +00:00
# separator = "."
### Default tags that will be added to all metrics. These can be overridden at the template level
### or by tags extracted from metric
# tags = ["region=us-east", "zone=1c"]
### Each template line requires a template pattern. It can have an optional
### filter before the template and separated by spaces. It can also have optional extra
### tags following the template. Multiple tags should be separated by commas and no spaces
2016-02-05 22:08:24 +00:00
### similar to the line protocol format. There can be only one default template.
2016-02-05 21:44:11 +00:00
# templates = [
# "*.app env.service.resource.measurement",
# # Default template
# "server.*",
2016-02-05 22:08:24 +00:00
# ]
###
### [collectd]
###
### Controls the listener for collectd data.
###
2015-01-30 00:36:27 +00:00
[collectd]
enabled = false
# bind-address = ""
# database = ""
# typesdb = ""
# These next lines control how batching works. You should have this enabled
2015-09-07 02:16:00 +00:00
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
2015-11-05 22:43:36 +00:00
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
###
### [opentsdb]
###
### Controls the listener for OpenTSDB data.
###
[opentsdb]
enabled = false
# bind-address = ":4242"
# database = "opentsdb"
# retention-policy = ""
# consistency-level = "one"
# tls-enabled = false
# certificate= ""
# log-point-errors = true # Log an error for every malformed point.
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Only points
# metrics received over the telnet protocol undergo batching.
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
###
### [[udp]]
###
### Controls the listeners for InfluxDB line protocol data via UDP.
###
[[udp]]
enabled = false
# bind-address = ""
# database = "udp"
# retention-policy = ""
# These next lines control how batching works. You should have this enabled
2015-09-07 02:16:00 +00:00
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
2015-11-05 22:43:36 +00:00
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
# set the expected UDP payload size; lower values tend to yield better performance, default is max UDP size 65536
# udp-payload-size = 65536
###
### [continuous_queries]
###
### Controls how continuous queries are run within InfluxDB.
###
[continuous_queries]
log-enabled = true
enabled = true
Add continuous query option for customizing resampling This makes the following syntax possible: CREATE CONTINUOUS QUERY mycq ON mydb RESAMPLE EVERY 1m FOR 1h BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(5m) END The RESAMPLE option customizes how often an interval will be sampled and the duration. The interval is customized with EVERY. Any intervals within the resampling duration on a multiple of the resample interval will be updated with the new results from the query. The duration is customized with FOR. This determines how long an interval will participate in resampling. Both options are optional. If RESAMPLE is in the syntax, at least one of the two needs to be given. The default for both is the interval of the continuous query. The service also improves tracking of the last run time and the logic of when a query for an interval should be run. When determining the oldest interval to run for a query, the continuous query service determines what would have been the optimal time to perform the next query based on the last run time. It then uses this time to determine the oldest interval that should be run using the resample duration and will resample all intervals between this time and the current time as opposed to potentially forgetting about the last run in an interval if the continuous query service gets delayed for some reason. This removes the previous config options for customizing continuous queries since they are no longer relevant and adds a new option of customizing the run interval. The run interval determines how often the continuous query service polls for when it should execute a query. This option defaults to 1s, but can be set to 1m if the least common factor of all continuous queries' intervals is a higher value (like 1m).
2015-12-18 20:32:05 +00:00
# run-interval = "1s" # interval for how often continuous queries will be checked if they need to run