influxdb/cmd/influxd/config.go

371 lines
11 KiB
Go
Raw Normal View History

2014-10-21 05:32:47 +00:00
package main
import (
"fmt"
"log"
"net"
"net/url"
2014-10-21 05:32:47 +00:00
"os"
2014-12-31 19:42:53 +00:00
"os/user"
"path/filepath"
2014-10-21 05:32:47 +00:00
"strconv"
2015-01-08 20:23:48 +00:00
"strings"
2014-10-21 05:32:47 +00:00
"time"
"github.com/BurntSushi/toml"
"github.com/influxdb/influxdb/collectd"
2015-01-08 19:10:38 +00:00
"github.com/influxdb/influxdb/graphite"
2014-10-21 05:32:47 +00:00
)
const (
// DefaultPointBatchSize represents the number of points to batch together.
DefaultPointBatchSize = 100
// DefaultPointBatchSize represents the number of writes to batch together.
DefaultWriteBatchSize = 10 * 1024 * 1024 // 10MB
// DefaultConcurrentShardQueryLimit represents the number of shards that
// can be queried concurrently at one time.
DefaultConcurrentShardQueryLimit = 10
2014-12-31 19:42:53 +00:00
// DefaultAPIReadTimeout represents the duration before an API request times out.
2014-10-21 05:32:47 +00:00
DefaultAPIReadTimeout = 5 * time.Second
2014-12-31 19:42:53 +00:00
// DefaultBrokerPort represents the default port the broker runs on.
DefaultBrokerPort = 8086
// DefaultDataPort represents the default port the data server runs on.
DefaultDataPort = 8086
// DefaultJoinURLs represents the default URLs for joining a cluster.
DefaultJoinURLs = ""
2014-10-21 05:32:47 +00:00
)
// Config represents the configuration format for the influxd binary.
2015-01-11 02:49:13 +00:00
type Config struct {
Hostname string `toml:"hostname"`
BindAddress string `toml:"bind-address"`
ReportingDisabled bool `toml:"reporting-disabled"`
Version string `toml:"-"`
InfluxDBVersion string `toml:"-"`
Initialization struct {
JoinURLs string `toml:"join-urls"`
} `toml:"initialization"`
2015-01-11 02:49:13 +00:00
Authentication struct {
Enabled bool `toml:"enabled"`
} `toml:"authentication"`
Admin struct {
Enabled bool `toml:"enabled"`
Port int `toml:"port"`
2015-01-11 02:49:13 +00:00
} `toml:"admin"`
HTTPAPI struct {
Port int `toml:"port"`
SSLPort int `toml:"ssl-port"`
SSLCertPath string `toml:"ssl-cert"`
ReadTimeout Duration `toml:"read-timeout"`
} `toml:"api"`
Graphites []Graphite `toml:"graphite"`
Collectd Collectd `toml:"collectd"`
UDP struct {
Enabled bool `toml:"enabled"`
BindAddress string `toml:"bind-address"`
Port int `toml:"port"`
} `toml:"udp"`
2015-01-11 02:49:13 +00:00
Broker struct {
Port int `toml:"port"`
Dir string `toml:"dir"`
Timeout Duration `toml:"election-timeout"`
} `toml:"broker"`
Data struct {
2015-02-11 18:26:39 +00:00
Dir string `toml:"dir"`
Port int `toml:"port"`
RetentionCheckEnabled bool `toml:"retention-check-enabled"`
RetentionCheckPeriod Duration `toml:"retention-check-period"`
2015-01-11 02:49:13 +00:00
} `toml:"data"`
Cluster struct {
2015-01-30 00:36:27 +00:00
Dir string `toml:"dir"`
2015-01-11 02:49:13 +00:00
} `toml:"cluster"`
Logging struct {
2015-03-06 00:20:57 +00:00
File string `toml:"file"`
WriteTraceEnabled bool `toml:"write-tracing"`
2015-01-11 02:49:13 +00:00
} `toml:"logging"`
ContinuousQuery struct {
// when continuous queries are run we'll automatically recompute previous intervals
// in case lagged data came in. Set to zero if you never have lagged data. We do
// it this way because invalidating previously computed intervals would be insanely hard
// and expensive.
RecomputePreviousN int `toml:"recompute-previous-n"`
// The RecomputePreviousN setting provides guidance for how far back to recompute, the RecomputeNoOlderThan
// setting sets a ceiling on how far back in time it will go. For example, if you have 2 PreviousN
// and have this set to 10m, then we'd only compute the previous two intervals for any
// CQs that have a group by time <= 5m. For all others, we'd only recompute the previous window
RecomputeNoOlderThan Duration `toml:"recompute-no-older-than"`
// ComputeRunsPerInterval will determine how many times the current and previous N intervals
// will be computed. The group by time will be divided by this and it will get computed this many times:
// group by time seconds / runs per interval
// This will give partial results for current group by intervals and will determine how long it will
// be until lagged data is recomputed. For example, if this number is 10 and the group by time is 10m, it
// will be a minute past the previous 10m bucket of time before lagged data is picked up
ComputeRunsPerInterval int `toml:"compute-runs-per-interval"`
// ComputeNoMoreThan paired with the RunsPerInterval will determine the ceiling of how many times smaller
// group by times will be computed. For example, if you have RunsPerInterval set to 10 and this setting
// to 1m. Then for a group by time(1m) will actually only get computed once per interval (and once per PreviousN).
// If you have a group by time(5m) then you'll get five computes per interval. Any group by time window larger
// than 10m will get computed 10 times for each interval.
ComputeNoMoreThan Duration `toml:"compute-no-more-than"`
// If this flag is set to true, both the brokers and data nodes should ignore any CQ processing.
Disable bool `toml:"disable"`
} `toml:"continuous_queries"`
2015-01-11 02:49:13 +00:00
}
2014-10-21 05:32:47 +00:00
// NewConfig returns an instance of Config with reasonable defaults.
func NewConfig() *Config {
2014-12-31 19:42:53 +00:00
u, _ := user.Current()
2014-10-21 05:32:47 +00:00
c := &Config{}
2014-12-31 19:42:53 +00:00
c.Broker.Dir = filepath.Join(u.HomeDir, ".influxdb/broker")
c.Broker.Port = DefaultBrokerPort
2014-12-30 22:46:50 +00:00
c.Broker.Timeout = Duration(1 * time.Second)
2014-12-31 19:42:53 +00:00
c.Data.Dir = filepath.Join(u.HomeDir, ".influxdb/data")
c.Data.Port = DefaultDataPort
c.Data.RetentionCheckEnabled = true
c.Data.RetentionCheckPeriod = Duration(10 * time.Minute)
c.Admin.Enabled = true
c.Admin.Port = 8083
c.ContinuousQuery.RecomputePreviousN = 2
c.ContinuousQuery.RecomputeNoOlderThan = Duration(10 * time.Minute)
c.ContinuousQuery.ComputeRunsPerInterval = 10
c.ContinuousQuery.ComputeNoMoreThan = Duration(2 * time.Minute)
c.ContinuousQuery.Disable = false
c.ReportingDisabled = false
2014-10-21 05:32:47 +00:00
// Detect hostname (or set to localhost).
if c.Hostname, _ = os.Hostname(); c.Hostname == "" {
c.Hostname = "localhost"
}
// FIX(benbjohnson): Append where the udp servers are actually used.
// config.UDPServers = append(config.UDPServers, UDPInputConfig{
// Enabled: tomlConfiguration.InputPlugins.UDPInput.Enabled,
// Database: tomlConfiguration.InputPlugins.UDPInput.Database,
// Port: tomlConfiguration.InputPlugins.UDPInput.Port,
// })
return c
}
// DataAddr returns the TCP binding address for the data server.
func (c *Config) DataAddr() string {
2015-01-10 16:08:00 +00:00
return net.JoinHostPort(c.BindAddress, strconv.Itoa(c.Data.Port))
}
// DataAddrUDP returns the UDP address for the series listener.
func (c *Config) DataAddrUDP() string {
return net.JoinHostPort(c.UDP.BindAddress, strconv.Itoa(c.UDP.Port))
}
// DataURL returns the URL required to contact the data server.
func (c *Config) DataURL() *url.URL {
return &url.URL{
Scheme: "http",
Host: net.JoinHostPort(c.Hostname, strconv.Itoa(c.Data.Port)),
}
}
// BrokerAddr returns the binding address the Broker server
func (c *Config) BrokerAddr() string {
2014-12-30 22:46:50 +00:00
return fmt.Sprintf("%s:%d", c.BindAddress, c.Broker.Port)
}
// BrokerURL returns the URL required to contact the Broker server.
func (c *Config) BrokerURL() *url.URL {
return &url.URL{
Scheme: "http",
Host: net.JoinHostPort(c.Hostname, strconv.Itoa(c.Broker.Port)),
}
2014-12-19 16:51:24 +00:00
}
// BrokerDir returns the data directory to start up in and does home directory expansion if necessary.
func (c *Config) BrokerDir() string {
p, e := filepath.Abs(c.Broker.Dir)
if e != nil {
log.Fatalf("Unable to get absolute path for Broker Directory: %q", c.Broker.Dir)
}
return p
}
// DataDir returns the data directory to start up in and does home directory expansion if necessary.
func (c *Config) DataDir() string {
p, e := filepath.Abs(c.Data.Dir)
if e != nil {
log.Fatalf("Unable to get absolute path for Data Directory: %q", c.Data.Dir)
}
return p
}
func (c *Config) JoinURLs() string {
if c.Initialization.JoinURLs == "" {
return DefaultJoinURLs
} else {
return c.Initialization.JoinURLs
}
}
2014-10-21 05:32:47 +00:00
// Size represents a TOML parseable file size.
// Users can specify size using "m" for megabytes and "g" for gigabytes.
type Size int
// UnmarshalText parses a byte size from text.
func (s *Size) UnmarshalText(text []byte) error {
// Parse numeric portion of value.
length := len(string(text))
size, err := strconv.ParseInt(string(text[:length-1]), 10, 64)
if err != nil {
return err
}
// Parse unit of measure ("m", "g", etc).
switch suffix := text[len(text)-1]; suffix {
case 'm':
size *= 1 << 20 // MB
case 'g':
size *= 1 << 30 // GB
default:
return fmt.Errorf("unknown size suffix: %c", suffix)
}
// Check for overflow.
if size > maxInt {
return fmt.Errorf("size %d cannot be represented by an int", size)
}
*s = Size(size)
return nil
}
// Duration is a TOML wrapper type for time.Duration.
type Duration time.Duration
// UnmarshalText parses a TOML value into a duration value.
func (d *Duration) UnmarshalText(text []byte) error {
// Ignore if there is no value set.
if len(text) == 0 {
return nil
}
// Otherwise parse as a duration formatted string.
duration, err := time.ParseDuration(string(text))
if err != nil {
return err
}
// Set duration and return.
*d = Duration(duration)
return nil
}
// ParseConfigFile parses a configuration file at a given path.
func ParseConfigFile(path string) (*Config, error) {
c := NewConfig()
if _, err := toml.DecodeFile(path, &c); err != nil {
return nil, err
}
return c, nil
}
// ParseConfig parses a configuration string into a config object.
func ParseConfig(s string) (*Config, error) {
c := NewConfig()
if _, err := toml.Decode(s, &c); err != nil {
return nil, err
}
return c, nil
}
2015-01-11 02:49:13 +00:00
type Collectd struct {
Addr string `toml:"address"`
Port uint16 `toml:"port"`
Database string `toml:"database"`
Enabled bool `toml:"enabled"`
TypesDB string `toml:"typesdb"`
}
// ConnnectionString returns the connection string for this collectd config in the form host:port.
func (c *Collectd) ConnectionString(defaultBindAddr string) string {
addr := c.Addr
// If no address specified, use default.
if addr == "" {
addr = defaultBindAddr
}
port := c.Port
// If no port specified, use default.
if port == 0 {
port = collectd.DefaultPort
}
return fmt.Sprintf("%s:%d", addr, port)
}
2015-01-11 02:49:13 +00:00
type Graphite struct {
Addr string `toml:"address"`
Port uint16 `toml:"port"`
Database string `toml:"database"`
Enabled bool `toml:"enabled"`
Protocol string `toml:"protocol"`
NamePosition string `toml:"name-position"`
NameSeparator string `toml:"name-separator"`
}
2015-01-07 08:02:58 +00:00
// ConnnectionString returns the connection string for this Graphite config in the form host:port.
func (g *Graphite) ConnectionString(defaultBindAddr string) string {
2015-01-08 20:19:44 +00:00
addr := g.Addr
// If no address specified, use default.
2015-01-08 20:19:44 +00:00
if addr == "" {
addr = defaultBindAddr
}
2015-01-08 20:19:44 +00:00
port := g.Port
// If no port specified, use default.
2015-01-08 20:19:44 +00:00
if port == 0 {
2015-01-08 19:10:38 +00:00
port = graphite.DefaultGraphitePort
}
2015-01-08 20:19:44 +00:00
return fmt.Sprintf("%s:%d", addr, port)
}
2015-01-08 19:10:38 +00:00
// NameSeparatorString returns the character separating fields for Graphite data, or the default
// if no separator is set.
func (g *Graphite) NameSeparatorString() string {
if g.NameSeparator == "" {
2015-01-08 19:10:38 +00:00
return graphite.DefaultGraphiteNameSeparator
}
return g.NameSeparator
}
2015-01-08 20:32:46 +00:00
// LastEnabled returns whether the Graphite Server shoudl intepret the last field as "name".
2015-01-08 20:23:48 +00:00
func (g *Graphite) LastEnabled() bool {
return g.NamePosition == strings.ToLower("last")
}
2014-10-21 05:32:47 +00:00
// maxInt is the largest integer representable by a word (architeture dependent).
const maxInt = int64(^uint(0) >> 1)