modify the configuration to match this example https://github.com/influxdb/influxdb/pull/293#issuecomment-37389463
parent
7e957ec680
commit
1043101d00
|
@ -23,11 +23,13 @@ port = 8086 # binding is disabled if the port isn't set
|
|||
# ssl-port = 8084 # Ssl support is enabled if you set a port and cert
|
||||
# ssl-cert = /path/to/cert.pem
|
||||
|
||||
[graphite]
|
||||
# optionally enable a graphite (carbon) compatible ingestion
|
||||
enabled = false
|
||||
port = 2003
|
||||
database = "" # store graphite data in this database
|
||||
[input_plugins]
|
||||
|
||||
# Configure the graphite api
|
||||
[input_plugins.graphite]
|
||||
enabled = false
|
||||
# port = 2003
|
||||
# database = "" # store graphite data in this database
|
||||
|
||||
# Raft configuration
|
||||
[raft]
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Welcome to the InfluxDB configuration file.
|
||||
|
||||
# If hostname (on the OS) doesn't return a name that can be resolved by the other
|
||||
# systems in the cluster, you'll have to set the hostname to an IP or something
|
||||
# If hostname (on the OS) doesn't return a name that can be resolved by the other
|
||||
# systems in the cluster, you'll have to set the hostname to an IP or something
|
||||
# that can be resovled here.
|
||||
# hostname = ""
|
||||
|
||||
|
@ -20,6 +20,14 @@ assets = "./admin"
|
|||
ssl-port = 8087 # Ssl support is enabled if you set a port and cert
|
||||
ssl-cert = "../cert.pem"
|
||||
|
||||
[input_plugins]
|
||||
|
||||
# Configure the graphite api
|
||||
[input_plugins.graphite]
|
||||
enabled = false
|
||||
port = 2003
|
||||
database = "" # store graphite data in this database
|
||||
|
||||
# Raft configuration
|
||||
[raft]
|
||||
# The raft port should be open between all servers in a cluster.
|
||||
|
@ -76,12 +84,12 @@ lru-cache-size = "200m"
|
|||
# files. max-open-files is per shard so this * that will be max.
|
||||
# max-open-shards = 0
|
||||
|
||||
# These options specify how data is sharded across the cluster. There are two
|
||||
# These options specify how data is sharded across the cluster. There are two
|
||||
# shard configurations that have the same knobs: short term and long term.
|
||||
# Any series that begins with a capital letter like Exceptions will be written
|
||||
# into the long term storage. Any series beginning with a lower case letter
|
||||
# like exceptions will be written into short term. The idea being that you
|
||||
# can write high precision data into short term and drop it after a couple
|
||||
# can write high precision data into short term and drop it after a couple
|
||||
# of days. Meanwhile, continuous queries can run downsampling on the short term
|
||||
# data and write into the long term area.
|
||||
[sharding]
|
||||
|
@ -96,7 +104,7 @@ lru-cache-size = "200m"
|
|||
# over the network when doing a query.
|
||||
duration = "7d"
|
||||
|
||||
# split will determine how many shards to split each duration into. For example,
|
||||
# split will determine how many shards to split each duration into. For example,
|
||||
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
|
||||
# would be created that have the data for 2014-02-10. By default, data will
|
||||
# be split into those two shards deterministically by hashing the (database, serise)
|
||||
|
|
|
@ -93,10 +93,10 @@ type LoggingConfig struct {
|
|||
}
|
||||
|
||||
type LevelDbConfiguration struct {
|
||||
MaxOpenFiles int `toml:"max-open-files"`
|
||||
LruCacheSize size `toml:"lru-cache-size"`
|
||||
MaxOpenShards int `toml:"max-open-shards"`
|
||||
PointBatchSize int `toml:"point-batch-size"`
|
||||
MaxOpenFiles int `toml:"max-open-files"`
|
||||
LruCacheSize size `toml:"lru-cache-size"`
|
||||
MaxOpenShards int `toml:"max-open-shards"`
|
||||
PointBatchSize int `toml:"point-batch-size"`
|
||||
}
|
||||
|
||||
type ShardingDefinition struct {
|
||||
|
@ -160,19 +160,23 @@ type WalConfig struct {
|
|||
RequestsPerLogFile int `toml:"requests-per-log-file"`
|
||||
}
|
||||
|
||||
type InputPlugins struct {
|
||||
Graphite GraphiteConfig `toml:"graphite"`
|
||||
}
|
||||
|
||||
type TomlConfiguration struct {
|
||||
Admin AdminConfig
|
||||
Api ApiConfig
|
||||
Graphite GraphiteConfig
|
||||
Raft RaftConfig
|
||||
Storage StorageConfig
|
||||
Cluster ClusterConfig
|
||||
Logging LoggingConfig
|
||||
LevelDb LevelDbConfiguration
|
||||
Hostname string
|
||||
BindAddress string `toml:"bind-address"`
|
||||
Sharding ShardingDefinition `toml:"sharding"`
|
||||
WalConfig WalConfig `toml:"wal"`
|
||||
Admin AdminConfig
|
||||
HttpApi ApiConfig `toml:"api"`
|
||||
InputPlugins InputPlugins `toml:"input_plugins"`
|
||||
Raft RaftConfig
|
||||
Storage StorageConfig
|
||||
Cluster ClusterConfig
|
||||
Logging LoggingConfig
|
||||
LevelDb LevelDbConfiguration
|
||||
Hostname string
|
||||
BindAddress string `toml:"bind-address"`
|
||||
Sharding ShardingDefinition `toml:"sharding"`
|
||||
WalConfig WalConfig `toml:"wal"`
|
||||
}
|
||||
|
||||
type Configuration struct {
|
||||
|
@ -198,7 +202,7 @@ type Configuration struct {
|
|||
LevelDbMaxOpenFiles int
|
||||
LevelDbLruCacheSize int
|
||||
LevelDbMaxOpenShards int
|
||||
LevelDbPointBatchSize int
|
||||
LevelDbPointBatchSize int
|
||||
ShortTermShard *ShardConfiguration
|
||||
LongTermShard *ShardConfiguration
|
||||
ReplicationFactor int
|
||||
|
@ -256,12 +260,12 @@ func parseTomlConfiguration(filename string) (*Configuration, error) {
|
|||
config := &Configuration{
|
||||
AdminHttpPort: tomlConfiguration.Admin.Port,
|
||||
AdminAssetsDir: tomlConfiguration.Admin.Assets,
|
||||
ApiHttpPort: tomlConfiguration.Api.Port,
|
||||
ApiHttpCertPath: tomlConfiguration.Api.SslCertPath,
|
||||
ApiHttpSslPort: tomlConfiguration.Api.SslPort,
|
||||
GraphiteEnabled: tomlConfiguration.Graphite.Enabled,
|
||||
GraphitePort: tomlConfiguration.Graphite.Port,
|
||||
GraphiteDatabase: tomlConfiguration.Graphite.Database,
|
||||
ApiHttpPort: tomlConfiguration.HttpApi.Port,
|
||||
ApiHttpCertPath: tomlConfiguration.HttpApi.SslCertPath,
|
||||
ApiHttpSslPort: tomlConfiguration.HttpApi.SslPort,
|
||||
GraphiteEnabled: tomlConfiguration.InputPlugins.Graphite.Enabled,
|
||||
GraphitePort: tomlConfiguration.InputPlugins.Graphite.Port,
|
||||
GraphiteDatabase: tomlConfiguration.InputPlugins.Graphite.Database,
|
||||
RaftServerPort: tomlConfiguration.Raft.Port,
|
||||
RaftDir: tomlConfiguration.Raft.Dir,
|
||||
ProtobufPort: tomlConfiguration.Cluster.ProtobufPort,
|
||||
|
|
|
@ -34,6 +34,10 @@ func (self *LoadConfigurationSuite) TestConfig(c *C) {
|
|||
c.Assert(config.ApiHttpCertPath, Equals, "../cert.pem")
|
||||
c.Assert(config.ApiHttpPortString(), Equals, "")
|
||||
|
||||
c.Assert(config.GraphiteEnabled, Equals, false)
|
||||
c.Assert(config.GraphitePort, Equals, 2003)
|
||||
c.Assert(config.GraphiteDatabase, Equals, "")
|
||||
|
||||
c.Assert(config.RaftDir, Equals, "/tmp/influxdb/development/raft")
|
||||
c.Assert(config.RaftServerPort, Equals, 8090)
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Welcome to the InfluxDB configuration file.
|
||||
|
||||
# If hostname (on the OS) doesn't return a name that can be resolved by the other
|
||||
# systems in the cluster, you'll have to set the hostname to an IP or something
|
||||
# If hostname (on the OS) doesn't return a name that can be resolved by the other
|
||||
# systems in the cluster, you'll have to set the hostname to an IP or something
|
||||
# that can be resovled here.
|
||||
# hostname = ""
|
||||
|
||||
|
@ -21,6 +21,14 @@ port = 60500
|
|||
ssl-port = 60503
|
||||
ssl-cert = "./cert.pem"
|
||||
|
||||
[input_plugins]
|
||||
|
||||
# Configure the graphite api
|
||||
[input_plugins.graphite]
|
||||
enabled = true
|
||||
port = 60513
|
||||
database = "graphite_db" # store graphite data in this database
|
||||
|
||||
# Raft configuration
|
||||
[raft]
|
||||
# The raft port should be open between all servers in a cluster.
|
||||
|
@ -62,12 +70,12 @@ write-buffer-size = 1000
|
|||
# This setting determines how many responses can be buffered in memory per shard before data starts gettind dropped.
|
||||
query-shard-buffer-size = 500
|
||||
|
||||
# These options specify how data is sharded across the cluster. There are two
|
||||
# These options specify how data is sharded across the cluster. There are two
|
||||
# shard configurations that have the same knobs: short term and long term.
|
||||
# Any series that begins with a capital letter like Exceptions will be written
|
||||
# into the long term storage. Any series beginning with a lower case letter
|
||||
# like exceptions will be written into short term. The idea being that you
|
||||
# can write high precision data into short term and drop it after a couple
|
||||
# can write high precision data into short term and drop it after a couple
|
||||
# of days. Meanwhile, continuous queries can run downsampling on the short term
|
||||
# data and write into the long term area.
|
||||
[sharding]
|
||||
|
@ -82,7 +90,7 @@ query-shard-buffer-size = 500
|
|||
# over the network when doing a query.
|
||||
duration = "1h"
|
||||
|
||||
# split will determine how many shards to split each duration into. For example,
|
||||
# split will determine how many shards to split each duration into. For example,
|
||||
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
|
||||
# would be created that have the data for 2014-02-10. By default, data will
|
||||
# be split into those two shards deterministically by hashing the (database, serise)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Welcome to the InfluxDB configuration file.
|
||||
|
||||
# If hostname (on the OS) doesn't return a name that can be resolved by the other
|
||||
# systems in the cluster, you'll have to set the hostname to an IP or something
|
||||
# If hostname (on the OS) doesn't return a name that can be resolved by the other
|
||||
# systems in the cluster, you'll have to set the hostname to an IP or something
|
||||
# that can be resovled here.
|
||||
# hostname = ""
|
||||
|
||||
|
@ -60,12 +60,12 @@ write-buffer-size = 1000
|
|||
# This setting determines how many responses can be buffered in memory per shard before data starts gettind dropped.
|
||||
query-shard-buffer-size = 500
|
||||
|
||||
# These options specify how data is sharded across the cluster. There are two
|
||||
# These options specify how data is sharded across the cluster. There are two
|
||||
# shard configurations that have the same knobs: short term and long term.
|
||||
# Any series that begins with a capital letter like Exceptions will be written
|
||||
# into the long term storage. Any series beginning with a lower case letter
|
||||
# like exceptions will be written into short term. The idea being that you
|
||||
# can write high precision data into short term and drop it after a couple
|
||||
# can write high precision data into short term and drop it after a couple
|
||||
# of days. Meanwhile, continuous queries can run downsampling on the short term
|
||||
# data and write into the long term area.
|
||||
[sharding]
|
||||
|
@ -80,7 +80,7 @@ query-shard-buffer-size = 500
|
|||
# over the network when doing a query.
|
||||
duration = "1h"
|
||||
|
||||
# split will determine how many shards to split each duration into. For example,
|
||||
# split will determine how many shards to split each duration into. For example,
|
||||
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
|
||||
# would be created that have the data for 2014-02-10. By default, data will
|
||||
# be split into those two shards deterministically by hashing the (database, serise)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Welcome to the InfluxDB configuration file.
|
||||
|
||||
# If hostname (on the OS) doesn't return a name that can be resolved by the other
|
||||
# systems in the cluster, you'll have to set the hostname to an IP or something
|
||||
# If hostname (on the OS) doesn't return a name that can be resolved by the other
|
||||
# systems in the cluster, you'll have to set the hostname to an IP or something
|
||||
# that can be resovled here.
|
||||
# hostname = ""
|
||||
|
||||
|
@ -60,12 +60,12 @@ write-buffer-size = 1000
|
|||
# This setting determines how many responses can be buffered in memory per shard before data starts gettind dropped.
|
||||
query-shard-buffer-size = 500
|
||||
|
||||
# These options specify how data is sharded across the cluster. There are two
|
||||
# These options specify how data is sharded across the cluster. There are two
|
||||
# shard configurations that have the same knobs: short term and long term.
|
||||
# Any series that begins with a capital letter like Exceptions will be written
|
||||
# into the long term storage. Any series beginning with a lower case letter
|
||||
# like exceptions will be written into short term. The idea being that you
|
||||
# can write high precision data into short term and drop it after a couple
|
||||
# can write high precision data into short term and drop it after a couple
|
||||
# of days. Meanwhile, continuous queries can run downsampling on the short term
|
||||
# data and write into the long term area.
|
||||
[sharding]
|
||||
|
@ -80,7 +80,7 @@ query-shard-buffer-size = 500
|
|||
# over the network when doing a query.
|
||||
duration = "1h"
|
||||
|
||||
# split will determine how many shards to split each duration into. For example,
|
||||
# split will determine how many shards to split each duration into. For example,
|
||||
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
|
||||
# would be created that have the data for 2014-02-10. By default, data will
|
||||
# be split into those two shards deterministically by hashing the (database, serise)
|
||||
|
|
Loading…
Reference in New Issue