2013-12-11 22:19:02 +00:00
|
|
|
# Welcome to the InfluxDB configuration file.
|
2013-12-11 18:10:37 +00:00
|
|
|
|
2014-03-07 17:51:16 +00:00
|
|
|
# If hostname (on the OS) doesn't return a name that can be resolved by the other
|
|
|
|
# systems in the cluster, you'll have to set the hostname to an IP or something
|
2014-03-16 12:58:28 +00:00
|
|
|
# that can be resolved here.
|
2013-12-13 15:23:57 +00:00
|
|
|
# hostname = ""
|
|
|
|
|
2014-01-22 18:35:42 +00:00
|
|
|
bind-address = "0.0.0.0"
|
|
|
|
|
2013-12-11 18:49:11 +00:00
|
|
|
[logging]
|
|
|
|
# logging level can be one of "debug", "info", "warn" or "error"
|
2014-03-28 16:40:03 +00:00
|
|
|
level = "info"
|
2014-01-21 18:03:13 +00:00
|
|
|
file = "influxdb.log" # stdout to log to standard out
|
2013-12-11 18:49:11 +00:00
|
|
|
|
2013-12-11 18:10:37 +00:00
|
|
|
# Configure the admin server
|
|
|
|
[admin]
|
2014-01-24 18:55:54 +00:00
|
|
|
port = 8083 # binding is disabled if the port isn't set
|
2013-12-11 19:20:02 +00:00
|
|
|
assets = "./admin"
|
2013-12-11 18:10:37 +00:00
|
|
|
|
|
|
|
# Configure the http api
|
|
|
|
[api]
|
2014-01-24 18:55:54 +00:00
|
|
|
port = 8086 # binding is disabled if the port isn't set
|
|
|
|
# ssl-port = 8084 # Ssl support is enabled if you set a port and cert
|
|
|
|
# ssl-cert = /path/to/cert.pem
|
2013-12-11 18:10:37 +00:00
|
|
|
|
2014-04-28 09:41:52 +00:00
|
|
|
# connections will timeout after this amount of time. Ensures that clients that misbehave
|
2014-04-08 22:58:49 +00:00
|
|
|
# and keep alive connections they don't use won't end up connection a million times.
|
|
|
|
# However, if a request is taking longer than this to complete, could be a problem.
|
|
|
|
read-timeout = "5s"
|
|
|
|
|
2014-03-24 15:33:28 +00:00
|
|
|
[input_plugins]
|
|
|
|
|
|
|
|
# Configure the graphite api
|
|
|
|
[input_plugins.graphite]
|
|
|
|
enabled = false
|
|
|
|
# port = 2003
|
|
|
|
# database = "" # store graphite data in this database
|
2014-03-04 01:52:57 +00:00
|
|
|
|
2014-04-28 09:41:52 +00:00
|
|
|
[input_plugins.udp]
|
|
|
|
enabled = false
|
|
|
|
# port = 4444
|
|
|
|
# database = ""
|
|
|
|
|
2013-12-11 18:10:37 +00:00
|
|
|
# Raft configuration
|
|
|
|
[raft]
|
2013-12-11 22:19:02 +00:00
|
|
|
# The raft port should be open between all servers in a cluster.
|
|
|
|
# However, this port shouldn't be accessible from the internet.
|
|
|
|
|
2014-01-08 19:38:27 +00:00
|
|
|
port = 8090
|
2013-12-11 22:19:02 +00:00
|
|
|
|
|
|
|
# Where the raft logs are stored. The user running InfluxDB will need read/write access.
|
2013-12-11 19:20:02 +00:00
|
|
|
dir = "/tmp/influxdb/development/raft"
|
2013-12-11 18:10:37 +00:00
|
|
|
|
2014-03-24 18:41:32 +00:00
|
|
|
# election-timeout = "1s"
|
|
|
|
|
2013-12-11 22:19:02 +00:00
|
|
|
[storage]
|
|
|
|
dir = "/tmp/influxdb/development/db"
|
2014-02-21 00:50:01 +00:00
|
|
|
# How many requests to potentially buffer in memory. If the buffer gets filled then writes
|
|
|
|
# will still be logged and once the local storage has caught up (or compacted) the writes
|
|
|
|
# will be replayed from the WAL
|
2014-02-27 20:17:27 +00:00
|
|
|
write-buffer-size = 10000
|
2013-12-11 22:19:02 +00:00
|
|
|
|
|
|
|
[cluster]
|
|
|
|
# A comma separated list of servers to seed
|
|
|
|
# this server. this is only relevant when the
|
|
|
|
# server is joining a new cluster. Otherwise
|
|
|
|
# the server will use the list of known servers
|
|
|
|
# prior to shutting down. Any server can be pointed to
|
|
|
|
# as a seed. It will find the Raft leader automatically.
|
|
|
|
|
|
|
|
# Here's an example. Note that the port on the host is the same as the raft port.
|
2014-01-08 19:38:27 +00:00
|
|
|
# seed-servers = ["hosta:8090","hostb:8090"]
|
2013-12-11 22:19:02 +00:00
|
|
|
|
|
|
|
# Replication happens over a TCP connection with a Protobuf protocol.
|
|
|
|
# This port should be reachable between all servers in a cluster.
|
|
|
|
# However, this port shouldn't be accessible from the internet.
|
|
|
|
|
2014-01-08 19:38:27 +00:00
|
|
|
protobuf_port = 8099
|
2014-02-23 20:14:01 +00:00
|
|
|
protobuf_timeout = "2s" # the write timeout on the protobuf conn any duration parseable by time.ParseDuration
|
|
|
|
protobuf_heartbeat = "200ms" # the heartbeat interval between the servers. must be parseable by time.ParseDuration
|
2014-04-14 17:43:53 +00:00
|
|
|
protobuf_min_backoff = "1s" # the minimum backoff after a failed heartbeat attempt
|
|
|
|
protobuf_max_backoff = "10s" # the maxmimum backoff after a failed heartbeat attempt
|
2014-02-05 20:02:04 +00:00
|
|
|
|
2014-02-21 00:50:01 +00:00
|
|
|
# How many write requests to potentially buffer in memory per server. If the buffer gets filled then writes
|
|
|
|
# will still be logged and once the server has caught up (or come back online) the writes
|
|
|
|
# will be replayed from the WAL
|
2014-02-27 20:17:27 +00:00
|
|
|
write-buffer-size = 10000
|
2014-02-21 00:50:01 +00:00
|
|
|
|
2014-04-08 17:28:58 +00:00
|
|
|
# the maximum number of responses to buffer from remote nodes, if the
|
|
|
|
# expected number of responses exceed this number then querying will
|
|
|
|
# happen sequentially and the buffer size will be limited to this
|
|
|
|
# number
|
2014-04-25 00:41:44 +00:00
|
|
|
max-response-buffer-size = 100
|
2014-04-08 17:28:58 +00:00
|
|
|
|
2014-03-26 20:59:42 +00:00
|
|
|
# When queries get distributed out to shards, they go in parallel. This means that results can get buffered
|
|
|
|
# in memory since results will come in any order, but have to be processed in the correct time order.
|
|
|
|
# Setting this higher will give better performance, but you'll need more memory. Setting this to 1 will ensure
|
|
|
|
# that you don't need to buffer in memory, but you won't get the best performance.
|
|
|
|
concurrent-shard-query-limit = 10
|
2014-02-25 18:18:05 +00:00
|
|
|
|
2014-02-05 20:02:04 +00:00
|
|
|
[leveldb]
|
|
|
|
|
|
|
|
# Maximum mmap open files, this will affect the virtual memory used by
|
|
|
|
# the process
|
2014-02-27 22:43:30 +00:00
|
|
|
max-open-files = 40
|
2014-02-14 20:37:21 +00:00
|
|
|
|
2014-03-03 22:02:31 +00:00
|
|
|
# LRU cache size, LRU is used by leveldb to store contents of the
|
|
|
|
# uncompressed sstables. You can use `m` or `g` prefix for megabytes
|
|
|
|
# and gigabytes, respectively.
|
|
|
|
lru-cache-size = "200m"
|
|
|
|
|
2014-03-07 17:51:16 +00:00
|
|
|
# The default setting on this is 0, which means unlimited. Set this to something if you want to
|
2014-03-03 22:51:09 +00:00
|
|
|
# limit the max number of open files. max-open-files is per shard so this * that will be max.
|
|
|
|
max-open-shards = 0
|
|
|
|
|
2014-03-07 19:13:15 +00:00
|
|
|
# The default setting is 100. This option tells how many points will be fetched from LevelDb before
|
|
|
|
# they get flushed into backend.
|
|
|
|
point-batch-size = 100
|
|
|
|
|
2014-05-14 16:36:56 +00:00
|
|
|
# The number of points to batch in memory before writing them to leveldb. Lowering this number will
|
|
|
|
# reduce the memory usage, but will result in slower writes.
|
|
|
|
write-batch-size = 5000000
|
|
|
|
|
2014-03-07 17:51:16 +00:00
|
|
|
# These options specify how data is sharded across the cluster. There are two
|
2014-02-14 20:37:21 +00:00
|
|
|
# shard configurations that have the same knobs: short term and long term.
|
|
|
|
# Any series that begins with a capital letter like Exceptions will be written
|
|
|
|
# into the long term storage. Any series beginning with a lower case letter
|
|
|
|
# like exceptions will be written into short term. The idea being that you
|
2014-03-07 17:51:16 +00:00
|
|
|
# can write high precision data into short term and drop it after a couple
|
2014-02-14 20:37:21 +00:00
|
|
|
# of days. Meanwhile, continuous queries can run downsampling on the short term
|
|
|
|
# data and write into the long term area.
|
|
|
|
[sharding]
|
|
|
|
# how many servers in the cluster should have a copy of each shard.
|
|
|
|
# this will give you high availability and scalability on queries
|
2014-02-19 22:50:27 +00:00
|
|
|
replication-factor = 1
|
2014-02-14 20:37:21 +00:00
|
|
|
|
|
|
|
[sharding.short-term]
|
|
|
|
# each shard will have this period of time. Note that it's best to have
|
|
|
|
# group by time() intervals on all queries be < than this setting. If they are
|
|
|
|
# then the aggregate is calculated locally. Otherwise, all that data gets sent
|
|
|
|
# over the network when doing a query.
|
2014-02-19 22:50:27 +00:00
|
|
|
duration = "7d"
|
2014-02-14 20:37:21 +00:00
|
|
|
|
2014-03-07 17:51:16 +00:00
|
|
|
# split will determine how many shards to split each duration into. For example,
|
2014-02-14 20:37:21 +00:00
|
|
|
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
|
|
|
|
# would be created that have the data for 2014-02-10. By default, data will
|
|
|
|
# be split into those two shards deterministically by hashing the (database, serise)
|
|
|
|
# tuple. That means that data for a given series will be written to a single shard
|
|
|
|
# making querying efficient. That can be overridden with the next option.
|
|
|
|
split = 1
|
|
|
|
|
|
|
|
# You can override the split behavior to have the data for series that match a
|
|
|
|
# given regex be randomly distributed across the shards for a given interval.
|
|
|
|
# You can use this if you have a hot spot for a given time series writing more
|
|
|
|
# data than a single server can handle. Most people won't have to resort to this
|
|
|
|
# option. Also note that using this option means that queries will have to send
|
|
|
|
# all data over the network so they won't be as efficient.
|
|
|
|
# split-random = "/^hf.*/"
|
|
|
|
|
|
|
|
[sharding.long-term]
|
|
|
|
duration = "30d"
|
|
|
|
split = 1
|
2014-02-12 10:15:02 +00:00
|
|
|
# split-random = "/^Hf.*/"
|
|
|
|
|
|
|
|
[wal]
|
|
|
|
|
|
|
|
dir = "/tmp/influxdb/development/wal"
|
2014-03-26 16:48:08 +00:00
|
|
|
flush-after = 1000 # the number of writes after which wal will be flushed, 0 for flushing on every write
|
|
|
|
bookmark-after = 1000 # the number of writes after which a bookmark will be created
|
2014-02-13 18:48:37 +00:00
|
|
|
|
|
|
|
# the number of writes after which an index entry is created pointing
|
|
|
|
# to the offset of the first request, default to 1k
|
|
|
|
index-after = 1000
|
2014-02-14 18:58:03 +00:00
|
|
|
|
|
|
|
# the number of requests per one log file, if new requests came in a
|
|
|
|
# new log file will be created
|
2014-02-18 20:17:01 +00:00
|
|
|
requests-per-logfile = 10000
|