2015-05-28 21:47:47 +00:00
|
|
|
package run
|
2014-12-06 01:02:30 +00:00
|
|
|
|
|
|
|
import (
|
2015-02-12 00:10:18 +00:00
|
|
|
"fmt"
|
2015-02-03 16:51:05 +00:00
|
|
|
"net"
|
2015-05-29 19:50:05 +00:00
|
|
|
|
2015-05-28 21:47:47 +00:00
|
|
|
"github.com/influxdb/influxdb/cluster"
|
|
|
|
"github.com/influxdb/influxdb/meta"
|
2015-05-30 14:20:12 +00:00
|
|
|
"github.com/influxdb/influxdb/services/admin"
|
|
|
|
"github.com/influxdb/influxdb/services/collectd"
|
|
|
|
"github.com/influxdb/influxdb/services/httpd"
|
|
|
|
"github.com/influxdb/influxdb/services/opentsdb"
|
2015-05-28 21:47:47 +00:00
|
|
|
"github.com/influxdb/influxdb/tsdb"
|
2014-12-06 01:02:30 +00:00
|
|
|
)
|
|
|
|
|
2015-05-30 16:11:23 +00:00
|
|
|
// Server represents a container for the metadata and storage data and services.
|
|
|
|
// It is built using a Config and it manages the startup and shutdown of all
|
|
|
|
// services in the proper order.
|
2015-05-28 21:47:47 +00:00
|
|
|
type Server struct {
|
2015-05-30 16:11:23 +00:00
|
|
|
MetaStore *meta.Store
|
|
|
|
TSDBStore *tsdb.Store
|
|
|
|
QueryExecutor *tsdb.QueryExecutor
|
|
|
|
PointsWriter tsdb.PointsWriter
|
2015-05-28 21:47:47 +00:00
|
|
|
|
|
|
|
Services []Service
|
2015-03-25 16:49:05 +00:00
|
|
|
}
|
|
|
|
|
2015-05-28 21:47:47 +00:00
|
|
|
// NewServer returns a new instance of Server built from a config.
|
2015-05-29 19:50:05 +00:00
|
|
|
func NewServer(c *Config, joinURLs string) *Server {
|
2015-05-28 21:47:47 +00:00
|
|
|
// Construct base meta store and data store.
|
|
|
|
s := &Server{
|
2015-05-29 19:50:05 +00:00
|
|
|
MetaStore: meta.NewStore(c.Meta.Dir),
|
|
|
|
TSDBStore: tsdb.NewStore(c.Data.Dir),
|
2015-05-28 21:47:47 +00:00
|
|
|
}
|
|
|
|
|
2015-05-30 16:11:23 +00:00
|
|
|
// Append services.
|
|
|
|
s.appendClusterService(c.Cluster)
|
|
|
|
s.appendAdminService(c.Admin)
|
|
|
|
s.appendHTTPDService(c.HTTPD)
|
|
|
|
s.appendCollectdService(c.Collectd)
|
|
|
|
s.appendOpenTSDBService(c.OpenTSDB)
|
|
|
|
for _, g := range c.Graphites {
|
|
|
|
s.appendGraphiteServices(g)
|
2015-05-28 21:47:47 +00:00
|
|
|
}
|
|
|
|
|
2015-05-30 16:11:23 +00:00
|
|
|
return s
|
|
|
|
}
|
2015-05-29 20:12:00 +00:00
|
|
|
|
2015-05-30 16:11:23 +00:00
|
|
|
func (s *Server) appendClusterService(c cluster.Config) {
|
|
|
|
srv := cluster.NewService(c)
|
|
|
|
s.Services = append(s.Services, srv)
|
|
|
|
}
|
2015-05-28 21:47:47 +00:00
|
|
|
|
2015-05-30 16:11:23 +00:00
|
|
|
func (s *Server) appendAdminService(c admin.Config) {
|
|
|
|
srv := admin.NewService(c)
|
|
|
|
s.Services = append(s.Services, srv)
|
|
|
|
}
|
2015-05-29 20:12:00 +00:00
|
|
|
|
2015-05-30 16:11:23 +00:00
|
|
|
func (s *Server) appendHTTPDService(c httpd.Config) {
|
|
|
|
srv := httpd.NewService(c)
|
|
|
|
s.Services = append(s.Services, srv)
|
|
|
|
}
|
2015-05-28 21:47:47 +00:00
|
|
|
|
2015-05-30 16:11:23 +00:00
|
|
|
func (s *Server) appendCollectdService(c collectd.Config) {
|
|
|
|
srv := collectd.NewService(c)
|
|
|
|
s.Services = append(s.Services, srv)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) appendOpenTSDBService(c opentsdb.Config) {
|
|
|
|
srv := opentsdb.NewService(c)
|
|
|
|
s.Services = append(s.Services, srv)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) appendGraphiteService(c graphite.Config) {
|
|
|
|
srv := graphite.NewService(c)
|
|
|
|
s.Services = append(s.Services, srv)
|
2015-05-28 21:47:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open opens the meta and data store and all services.
|
|
|
|
func (s *Server) Open() error {
|
|
|
|
if err := func() error {
|
|
|
|
// Open meta store.
|
|
|
|
if err := s.MetaStore.Open(); err != nil {
|
|
|
|
return fmt.Errorf("open meta store: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open TSDB store.
|
|
|
|
if err := s.TSDBStore.Open(); err != nil {
|
|
|
|
return fmt.Errorf("open tsdb store: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, service := range s.Services {
|
|
|
|
if err := service.Open(); err != nil {
|
|
|
|
return fmt.Errorf("open service: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}(); err != nil {
|
|
|
|
s.Close()
|
|
|
|
return err
|
2015-03-26 21:54:10 +00:00
|
|
|
}
|
2015-05-28 21:47:47 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close shuts down the meta and data stores and all services.
|
|
|
|
func (s *Server) Close() error {
|
|
|
|
if s.MetaStore != nil {
|
|
|
|
s.MetaStore.Close()
|
|
|
|
}
|
|
|
|
if s.TSDBStore != nil {
|
|
|
|
s.TSDBStore.Close()
|
|
|
|
}
|
|
|
|
for _, service := range s.Services {
|
|
|
|
service.Close()
|
|
|
|
}
|
|
|
|
return nil
|
2015-03-26 21:54:10 +00:00
|
|
|
}
|
|
|
|
|
2015-05-29 19:50:05 +00:00
|
|
|
// Service represents a service attached to the server.
|
|
|
|
type Service interface {
|
|
|
|
Open() error
|
|
|
|
Close() error
|
|
|
|
Addr() net.Addr
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-04-06 21:18:40 +00:00
|
|
|
type Node struct {
|
2015-05-28 17:26:21 +00:00
|
|
|
Server *influxdb.Server
|
2015-04-07 20:36:18 +00:00
|
|
|
|
2015-04-22 18:00:51 +00:00
|
|
|
hostname string
|
|
|
|
|
2015-04-13 20:00:41 +00:00
|
|
|
adminServer *admin.Server
|
2015-04-16 17:47:00 +00:00
|
|
|
GraphiteServers []graphite.Server // The Graphite Servers
|
2015-04-16 20:28:55 +00:00
|
|
|
OpenTSDBServer *opentsdb.Server // The OpenTSDB Server
|
2015-04-16 17:47:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Node) ClusterAddr() net.Addr {
|
|
|
|
return s.clusterListener.Addr()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Node) ClusterURL() *url.URL {
|
2015-04-22 18:00:51 +00:00
|
|
|
// Find out which port the cluster started on
|
|
|
|
_, p, e := net.SplitHostPort(s.ClusterAddr().String())
|
2015-04-16 22:59:10 +00:00
|
|
|
if e != nil {
|
|
|
|
panic(e)
|
|
|
|
}
|
|
|
|
|
2015-04-22 18:00:51 +00:00
|
|
|
h := net.JoinHostPort(s.hostname, p)
|
2015-04-16 21:21:02 +00:00
|
|
|
return &url.URL{
|
|
|
|
Scheme: "http",
|
2015-04-16 22:59:10 +00:00
|
|
|
Host: h,
|
2015-04-16 17:47:00 +00:00
|
|
|
}
|
2015-03-25 16:49:05 +00:00
|
|
|
}
|
|
|
|
|
2015-04-10 02:41:06 +00:00
|
|
|
func (s *Node) Close() error {
|
2015-04-07 20:36:18 +00:00
|
|
|
if err := s.closeClusterListener(); err != nil {
|
2015-04-10 02:41:06 +00:00
|
|
|
return err
|
2015-04-07 20:36:18 +00:00
|
|
|
}
|
|
|
|
|
2015-04-13 21:21:56 +00:00
|
|
|
if err := s.closeAPIListener(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-04-09 16:53:51 +00:00
|
|
|
if err := s.closeAdminServer(); err != nil {
|
2015-04-10 02:41:06 +00:00
|
|
|
return err
|
2015-04-09 16:53:51 +00:00
|
|
|
}
|
|
|
|
|
2015-04-16 17:47:00 +00:00
|
|
|
for _, g := range s.GraphiteServers {
|
|
|
|
if err := g.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-16 20:28:55 +00:00
|
|
|
if s.OpenTSDBServer != nil {
|
|
|
|
if err := s.OpenTSDBServer.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-07 20:36:18 +00:00
|
|
|
if s.DataNode != nil {
|
|
|
|
if err := s.DataNode.Close(); err != nil {
|
2015-04-10 02:41:06 +00:00
|
|
|
return err
|
2015-04-07 20:36:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-18 06:12:07 +00:00
|
|
|
if s.raftLog != nil {
|
|
|
|
if err := s.raftLog.Close(); err != nil {
|
2015-04-10 02:41:06 +00:00
|
|
|
return err
|
2015-03-26 23:04:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-18 06:12:07 +00:00
|
|
|
if s.Broker != nil {
|
|
|
|
if err := s.Broker.Close(); err != nil {
|
2015-04-10 02:41:06 +00:00
|
|
|
return err
|
2015-03-26 23:04:12 +00:00
|
|
|
}
|
|
|
|
}
|
2015-04-16 17:47:00 +00:00
|
|
|
|
2015-04-10 02:41:06 +00:00
|
|
|
return nil
|
2015-04-07 20:36:18 +00:00
|
|
|
}
|
2015-03-26 23:04:12 +00:00
|
|
|
|
2015-04-13 21:21:56 +00:00
|
|
|
func (s *Node) openListener(desc, addr string, h http.Handler) (net.Listener, error) {
|
2015-04-07 20:36:18 +00:00
|
|
|
var err error
|
|
|
|
listener, err := net.Listen("tcp", addr)
|
|
|
|
if err != nil {
|
2015-04-13 21:21:56 +00:00
|
|
|
return nil, err
|
2015-03-26 23:04:12 +00:00
|
|
|
}
|
2015-04-07 20:36:18 +00:00
|
|
|
go func() {
|
2015-04-13 21:21:56 +00:00
|
|
|
err := http.Serve(listener, h)
|
2015-04-07 20:36:18 +00:00
|
|
|
|
|
|
|
// The listener was closed so exit
|
|
|
|
// See https://github.com/golang/go/issues/4373
|
2015-04-17 18:30:11 +00:00
|
|
|
if strings.Contains(err.Error(), "closed") {
|
2015-04-07 20:36:18 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
2015-04-13 21:21:56 +00:00
|
|
|
log.Fatalf("%s server failed to serve on %s: %s", desc, addr, err)
|
2015-04-07 20:36:18 +00:00
|
|
|
}
|
|
|
|
}()
|
2015-04-13 21:21:56 +00:00
|
|
|
return listener, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Node) openAPIListener(addr string, h http.Handler) error {
|
|
|
|
var err error
|
|
|
|
s.apiListener, err = s.openListener("API", addr, h)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Node) closeAPIListener() error {
|
|
|
|
var err error
|
|
|
|
if s.apiListener != nil {
|
|
|
|
err = s.apiListener.Close()
|
|
|
|
s.apiListener = nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Node) openClusterListener(addr string, h http.Handler) error {
|
|
|
|
var err error
|
|
|
|
s.clusterListener, err = s.openListener("Cluster", addr, h)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-04-07 20:36:18 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-03-26 23:04:12 +00:00
|
|
|
|
2015-04-07 20:36:18 +00:00
|
|
|
func (s *Node) closeClusterListener() error {
|
|
|
|
var err error
|
|
|
|
if s.clusterListener != nil {
|
|
|
|
err = s.clusterListener.Close()
|
|
|
|
s.clusterListener = nil
|
|
|
|
}
|
|
|
|
return err
|
2015-03-26 23:04:12 +00:00
|
|
|
}
|
|
|
|
|
2015-04-07 15:51:17 +00:00
|
|
|
|
|
|
|
|
2015-04-07 18:08:27 +00:00
|
|
|
func (cmd *RunCommand) Open(config *Config, join string) *Node {
|
2015-03-25 20:27:20 +00:00
|
|
|
if config != nil {
|
|
|
|
cmd.config = config
|
|
|
|
}
|
|
|
|
|
2015-02-24 17:47:07 +00:00
|
|
|
log.Printf("influxdb started, version %s, commit %s", version, commit)
|
|
|
|
|
2015-01-27 01:29:30 +00:00
|
|
|
// Parse join urls from the --join flag.
|
2015-04-03 19:49:54 +00:00
|
|
|
joinURLs := parseURLs(join)
|
2015-03-27 20:12:09 +00:00
|
|
|
|
2015-04-15 17:18:44 +00:00
|
|
|
// Start the broker handler.
|
|
|
|
h := &Handler{Config: config}
|
|
|
|
if err := cmd.node.openClusterListener(cmd.config.ClusterAddr(), h); err != nil {
|
|
|
|
log.Fatalf("Cluster server failed to listen on %s. %s ", cmd.config.ClusterAddr(), err)
|
|
|
|
}
|
2015-04-22 18:00:51 +00:00
|
|
|
log.Printf("Cluster server listening on %s", cmd.node.ClusterAddr().String())
|
2015-04-15 17:18:44 +00:00
|
|
|
|
2015-03-10 20:53:45 +00:00
|
|
|
// Open broker & raft log, initialize or join as necessary.
|
2015-03-26 23:04:12 +00:00
|
|
|
if cmd.config.Broker.Enabled {
|
2015-04-15 17:18:44 +00:00
|
|
|
cmd.openBroker(joinURLs, h)
|
2015-04-08 20:35:25 +00:00
|
|
|
// If were running as a broker locally, always connect to it since it must
|
|
|
|
// be ready before we can start the data node.
|
2015-04-16 17:47:00 +00:00
|
|
|
joinURLs = []url.URL{*cmd.node.ClusterURL()}
|
2015-03-26 23:04:12 +00:00
|
|
|
}
|
2014-12-30 22:46:50 +00:00
|
|
|
|
2015-03-26 23:04:12 +00:00
|
|
|
var s *influxdb.Server
|
2015-01-07 00:21:32 +00:00
|
|
|
// Open server, initialize or join as necessary.
|
2015-03-26 23:04:12 +00:00
|
|
|
if cmd.config.Data.Enabled {
|
2015-03-27 20:12:09 +00:00
|
|
|
|
2015-03-26 23:04:12 +00:00
|
|
|
//FIXME: Need to also pass in dataURLs to bootstrap a data node
|
2015-04-03 19:49:54 +00:00
|
|
|
s = cmd.openServer(joinURLs)
|
Don't set data node until after it has joined or initialized
By setting it, data node requests can be served by the http handler
before the data node is actually ready.
Possible fix for:
2015/04/14 11:33:54 http: panic serving 10.0.1.8:62661: runtime error: invalid memory address or nil pointer dereference
goroutine 11467 [running]:
net/http.func·011()
/usr/local/go/src/net/http/server.go:1130 +0xcc
github.com/influxdb/influxdb.(*Server).broadcast(0xc20805cc00, 0xc208220000, 0x5d25e0, 0xc208869e80, 0x0, 0x0, 0x0)
/Users/jason/go/src/github.com/influxdb/influxdb/server.go:568 +0x227
github.com/influxdb/influxdb.(*Server).CreateDataNode(0xc20805cc00, 0xc2081c6e70, 0x0, 0x0)
/Users/jason/go/src/github.com/influxdb/influxdb/server.go:859 +0xe6
github.com/influxdb/influxdb/httpd.(*Handler).serveCreateDataNode(0xc20842ea00, 0x19378c0, 0xc2082207e0, 0xc2083191e0)
2015-04-14 18:03:55 +00:00
|
|
|
cmd.node.DataNode = s
|
2015-03-26 23:04:12 +00:00
|
|
|
s.SetAuthenticationEnabled(cmd.config.Authentication.Enabled)
|
2015-04-13 16:45:39 +00:00
|
|
|
log.Printf("authentication enabled: %v\n", cmd.config.Authentication.Enabled)
|
2015-03-26 23:04:12 +00:00
|
|
|
|
|
|
|
// Enable retention policy enforcement if requested.
|
|
|
|
if cmd.config.Data.RetentionCheckEnabled {
|
|
|
|
interval := time.Duration(cmd.config.Data.RetentionCheckPeriod)
|
|
|
|
if err := s.StartRetentionPolicyEnforcement(interval); err != nil {
|
|
|
|
log.Fatalf("retention policy enforcement failed: %s", err.Error())
|
|
|
|
}
|
|
|
|
log.Printf("broker enforcing retention policies with check interval of %s", interval)
|
2015-02-10 21:41:55 +00:00
|
|
|
}
|
|
|
|
|
2015-03-26 23:04:12 +00:00
|
|
|
// Start shard group pre-create
|
|
|
|
interval := cmd.config.ShardGroupPreCreateCheckPeriod()
|
|
|
|
if err := s.StartShardGroupsPreCreate(interval); err != nil {
|
|
|
|
log.Fatalf("shard group pre-create failed: %s", err.Error())
|
|
|
|
}
|
|
|
|
log.Printf("shard group pre-create with check interval of %s", interval)
|
2015-03-09 23:27:15 +00:00
|
|
|
}
|
|
|
|
|
2015-01-10 16:08:00 +00:00
|
|
|
// Start the server handler. Attach to broker if listening on the same port.
|
2015-01-07 00:21:32 +00:00
|
|
|
if s != nil {
|
2015-03-31 22:45:33 +00:00
|
|
|
h.Server = s
|
2015-05-28 21:47:47 +00:00
|
|
|
// if config.Snapshot.Enabled {
|
|
|
|
// log.Printf("snapshot server listening on %s", cmd.config.ClusterAddr())
|
|
|
|
// } else {
|
|
|
|
// log.Printf("snapshot server disabled")
|
|
|
|
// }
|
2015-02-11 07:03:23 +00:00
|
|
|
|
2015-01-09 00:09:28 +00:00
|
|
|
// Spin up the collectd server
|
2015-03-25 20:27:20 +00:00
|
|
|
if cmd.config.Collectd.Enabled {
|
|
|
|
c := cmd.config.Collectd
|
2015-01-10 03:12:18 +00:00
|
|
|
cs := collectd.NewServer(s, c.TypesDB)
|
|
|
|
cs.Database = c.Database
|
2015-03-25 20:27:20 +00:00
|
|
|
err := collectd.ListenAndServe(cs, c.ConnectionString(cmd.config.BindAddress))
|
2015-01-09 00:09:28 +00:00
|
|
|
if err != nil {
|
2015-01-11 14:44:52 +00:00
|
|
|
log.Printf("failed to start collectd Server: %v\n", err.Error())
|
2015-01-09 00:09:28 +00:00
|
|
|
}
|
|
|
|
}
|
2015-02-11 07:03:23 +00:00
|
|
|
|
2015-01-06 22:26:31 +00:00
|
|
|
// Spin up any Graphite servers
|
2015-04-16 18:15:57 +00:00
|
|
|
for _, graphiteConfig := range cmd.config.Graphites {
|
|
|
|
if !graphiteConfig.Enabled {
|
2015-01-06 22:26:31 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-01-02 16:09:11 +00:00
|
|
|
|
2015-01-07 07:18:14 +00:00
|
|
|
// Configure Graphite parsing.
|
|
|
|
parser := graphite.NewParser()
|
2015-04-16 18:15:57 +00:00
|
|
|
parser.Separator = graphiteConfig.NameSeparatorString()
|
|
|
|
parser.LastEnabled = graphiteConfig.LastEnabled()
|
2015-01-07 07:18:14 +00:00
|
|
|
|
2015-04-16 18:15:57 +00:00
|
|
|
if err := s.CreateDatabaseIfNotExists(graphiteConfig.DatabaseString()); err != nil {
|
|
|
|
log.Fatalf("failed to create database for %s Graphite server: %s", graphiteConfig.Protocol, err.Error())
|
2015-03-12 20:29:51 +00:00
|
|
|
}
|
|
|
|
|
2015-03-12 19:12:23 +00:00
|
|
|
// Spin up the server.
|
|
|
|
var g graphite.Server
|
2015-04-16 18:15:57 +00:00
|
|
|
g, err := graphite.NewServer(graphiteConfig.Protocol, parser, s, graphiteConfig.DatabaseString())
|
2015-03-12 19:12:23 +00:00
|
|
|
if err != nil {
|
2015-04-16 18:15:57 +00:00
|
|
|
log.Fatalf("failed to initialize %s Graphite server: %s", graphiteConfig.Protocol, err.Error())
|
2015-03-12 19:12:23 +00:00
|
|
|
}
|
2015-03-12 20:29:51 +00:00
|
|
|
|
2015-04-16 18:15:57 +00:00
|
|
|
err = g.ListenAndServe(graphiteConfig.ConnectionString())
|
2015-03-12 19:12:23 +00:00
|
|
|
if err != nil {
|
2015-04-16 18:15:57 +00:00
|
|
|
log.Fatalf("failed to start %s Graphite server: %s", graphiteConfig.Protocol, err.Error())
|
2015-01-06 03:14:43 +00:00
|
|
|
}
|
2015-04-16 17:47:00 +00:00
|
|
|
cmd.node.GraphiteServers = append(cmd.node.GraphiteServers, g)
|
2015-01-02 16:09:11 +00:00
|
|
|
}
|
2015-03-12 23:23:33 +00:00
|
|
|
|
2015-03-29 16:26:03 +00:00
|
|
|
// Spin up any OpenTSDB servers
|
|
|
|
if config.OpenTSDB.Enabled {
|
|
|
|
o := config.OpenTSDB
|
|
|
|
db := o.DatabaseString()
|
2015-04-16 20:28:55 +00:00
|
|
|
laddr := o.ListenAddress()
|
2015-03-29 16:26:03 +00:00
|
|
|
policy := o.RetentionPolicy
|
|
|
|
|
|
|
|
if err := s.CreateDatabaseIfNotExists(db); err != nil {
|
|
|
|
log.Fatalf("failed to create database for OpenTSDB server: %s", err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if policy != "" {
|
|
|
|
// Ensure retention policy exists.
|
|
|
|
rp := influxdb.NewRetentionPolicy(policy)
|
|
|
|
if err := s.CreateRetentionPolicyIfNotExists(db, rp); err != nil {
|
|
|
|
log.Fatalf("failed to create retention policy for OpenTSDB: %s", err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
os := opentsdb.NewServer(s, policy, db)
|
|
|
|
|
2015-04-06 18:07:31 +00:00
|
|
|
log.Println("Starting OpenTSDB service on", laddr)
|
2015-03-29 16:26:03 +00:00
|
|
|
go os.ListenAndServe(laddr)
|
2015-04-16 20:28:55 +00:00
|
|
|
cmd.node.OpenTSDBServer = os
|
2015-03-29 16:26:03 +00:00
|
|
|
}
|
|
|
|
|
2015-03-12 23:23:33 +00:00
|
|
|
// Start up self-monitoring if enabled.
|
2015-03-25 20:27:20 +00:00
|
|
|
if cmd.config.Monitoring.Enabled {
|
2015-05-30 14:49:49 +00:00
|
|
|
database := "_influxdb"
|
|
|
|
policy := "default"
|
2015-03-25 20:27:20 +00:00
|
|
|
interval := time.Duration(cmd.config.Monitoring.WriteInterval)
|
2015-03-13 22:26:04 +00:00
|
|
|
|
2015-03-13 23:42:06 +00:00
|
|
|
// Ensure database exists.
|
2015-03-13 22:26:04 +00:00
|
|
|
if err := s.CreateDatabaseIfNotExists(database); err != nil {
|
2015-04-03 21:24:28 +00:00
|
|
|
log.Fatalf("failed to create database %s for internal monitoring: %s", database, err.Error())
|
2015-03-13 22:26:04 +00:00
|
|
|
}
|
2015-03-13 23:42:06 +00:00
|
|
|
|
|
|
|
// Ensure retention policy exists.
|
|
|
|
rp := influxdb.NewRetentionPolicy(policy)
|
|
|
|
if err := s.CreateRetentionPolicyIfNotExists(database, rp); err != nil {
|
2015-04-03 21:24:28 +00:00
|
|
|
log.Fatalf("failed to create retention policy for internal monitoring: %s", err.Error())
|
2015-03-13 23:42:06 +00:00
|
|
|
}
|
|
|
|
|
2015-03-13 23:10:40 +00:00
|
|
|
s.StartSelfMonitoring(database, policy, interval)
|
2015-03-13 22:58:55 +00:00
|
|
|
log.Printf("started self-monitoring at interval of %s", interval)
|
2015-03-12 23:23:33 +00:00
|
|
|
}
|
2014-12-31 19:42:53 +00:00
|
|
|
}
|
2015-02-12 16:59:32 +00:00
|
|
|
|
2015-02-12 19:23:10 +00:00
|
|
|
// unless disabled, start the loop to report anonymous usage stats every 24h
|
2015-05-29 19:50:05 +00:00
|
|
|
if cmd.config.ReportingEnabled {
|
2015-03-26 23:04:12 +00:00
|
|
|
if cmd.config.Broker.Enabled && cmd.config.Data.Enabled {
|
|
|
|
// Make sure we have a config object b4 we try to use it.
|
2015-04-07 18:08:27 +00:00
|
|
|
if clusterID := cmd.node.Broker.Broker.ClusterID(); clusterID != 0 {
|
2015-03-26 23:04:12 +00:00
|
|
|
go s.StartReportingLoop(clusterID)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Fatalln("failed to start reporting because not running as a broker and a data node")
|
2015-03-06 18:24:56 +00:00
|
|
|
}
|
2015-02-12 19:23:10 +00:00
|
|
|
}
|
2015-02-12 16:59:32 +00:00
|
|
|
|
2015-04-07 18:08:27 +00:00
|
|
|
if cmd.node.Broker != nil {
|
2015-04-08 21:43:09 +00:00
|
|
|
// have it occasionally tell a data node in the cluster to run continuous queries
|
|
|
|
if cmd.config.ContinuousQuery.Disabled {
|
|
|
|
log.Printf("Not running continuous queries. [continuous_queries].disabled is set to true.")
|
|
|
|
} else {
|
2015-04-07 18:08:27 +00:00
|
|
|
cmd.node.Broker.RunContinuousQueryLoop()
|
2015-04-08 21:43:09 +00:00
|
|
|
}
|
2015-03-26 23:04:12 +00:00
|
|
|
}
|
2015-04-08 21:43:09 +00:00
|
|
|
|
2015-04-13 21:21:56 +00:00
|
|
|
if cmd.config.APIAddr() != cmd.config.ClusterAddr() {
|
|
|
|
err := cmd.node.openAPIListener(cmd.config.APIAddr(), h)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("API server failed to listen on %s. %s ", cmd.config.APIAddr(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Printf("API server listening on %s", cmd.config.APIAddr())
|
|
|
|
|
2015-04-07 18:08:27 +00:00
|
|
|
return cmd.node
|
2015-03-26 23:04:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (cmd *RunCommand) Close() {
|
2015-04-06 21:18:40 +00:00
|
|
|
cmd.node.Close()
|
2014-12-30 22:46:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-01-07 00:21:32 +00:00
|
|
|
// creates and initializes a broker.
|
2015-04-15 17:18:44 +00:00
|
|
|
func (cmd *RunCommand) openBroker(brokerURLs []url.URL, h *Handler) {
|
2015-03-26 20:30:03 +00:00
|
|
|
path := cmd.config.BrokerDir()
|
2015-04-16 17:47:00 +00:00
|
|
|
u := cmd.node.ClusterURL()
|
2015-03-26 20:30:03 +00:00
|
|
|
raftTracing := cmd.config.Logging.RaftTracing
|
|
|
|
|
2015-03-26 21:54:10 +00:00
|
|
|
// Create broker
|
|
|
|
b := influxdb.NewBroker()
|
2015-04-14 02:40:51 +00:00
|
|
|
b.TruncationInterval = time.Duration(cmd.config.Broker.TruncationInterval)
|
2015-04-14 01:59:50 +00:00
|
|
|
b.MaxTopicSize = cmd.config.Broker.MaxTopicSize
|
|
|
|
b.MaxSegmentSize = cmd.config.Broker.MaxSegmentSize
|
2015-04-07 18:08:27 +00:00
|
|
|
cmd.node.Broker = b
|
2015-03-26 21:54:10 +00:00
|
|
|
|
2015-03-10 20:53:45 +00:00
|
|
|
// Create raft log.
|
|
|
|
l := raft.NewLog()
|
2015-04-16 17:47:00 +00:00
|
|
|
l.SetURL(*u)
|
2015-03-10 22:27:37 +00:00
|
|
|
l.DebugEnabled = raftTracing
|
2015-03-10 20:53:45 +00:00
|
|
|
b.Log = l
|
2015-04-06 21:18:40 +00:00
|
|
|
cmd.node.raftLog = l
|
2015-01-20 02:44:47 +00:00
|
|
|
|
2015-04-23 22:03:28 +00:00
|
|
|
// Create Raft clock.
|
|
|
|
clk := raft.NewClock()
|
2015-04-23 22:11:20 +00:00
|
|
|
clk.ApplyInterval = time.Duration(cmd.config.Raft.ApplyInterval)
|
2015-04-23 22:03:28 +00:00
|
|
|
clk.ElectionTimeout = time.Duration(cmd.config.Raft.ElectionTimeout)
|
2015-04-23 22:11:20 +00:00
|
|
|
clk.HeartbeatInterval = time.Duration(cmd.config.Raft.HeartbeatInterval)
|
|
|
|
clk.ReconnectTimeout = time.Duration(cmd.config.Raft.ReconnectTimeout)
|
2015-04-23 22:03:28 +00:00
|
|
|
l.Clock = clk
|
|
|
|
|
2015-03-10 20:53:45 +00:00
|
|
|
// Open broker so it can feed last index data to the log.
|
|
|
|
if err := b.Open(path); err != nil {
|
2015-03-18 15:58:56 +00:00
|
|
|
log.Fatalf("failed to open broker at %s : %s", path, err)
|
2014-12-16 03:35:26 +00:00
|
|
|
}
|
2015-03-20 22:14:42 +00:00
|
|
|
log.Printf("broker opened at %s", path)
|
2015-01-07 00:21:32 +00:00
|
|
|
|
2015-04-07 03:39:18 +00:00
|
|
|
// Attach the broker as the finite state machine of the raft log.
|
2015-03-10 20:53:45 +00:00
|
|
|
l.FSM = &messaging.RaftFSM{Broker: b}
|
|
|
|
|
|
|
|
// Open raft log inside broker directory.
|
|
|
|
if err := l.Open(filepath.Join(path, "raft")); err != nil {
|
|
|
|
log.Fatalf("raft: %s", err)
|
|
|
|
}
|
|
|
|
|
2015-04-15 17:43:58 +00:00
|
|
|
// Attach broker and log to handler.
|
2015-04-15 17:18:44 +00:00
|
|
|
h.Broker = b
|
|
|
|
h.Log = l
|
|
|
|
|
2015-04-08 21:47:24 +00:00
|
|
|
// Checks to see if the raft index is 0. If it's 0, it might be the first
|
|
|
|
// node in the cluster and must initialize or join
|
2015-04-14 19:43:25 +00:00
|
|
|
index, _ := l.LastLogIndexTerm()
|
2015-04-08 21:47:24 +00:00
|
|
|
if index == 0 {
|
|
|
|
// If we have join URLs, then attemp to join the cluster
|
|
|
|
if len(brokerURLs) > 0 {
|
2015-04-06 16:47:33 +00:00
|
|
|
joinLog(l, brokerURLs)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-26 20:30:03 +00:00
|
|
|
if err := l.Initialize(); err != nil {
|
|
|
|
log.Fatalf("initialize raft log: %s", err)
|
2015-01-07 00:21:32 +00:00
|
|
|
}
|
2015-04-08 21:47:24 +00:00
|
|
|
|
2015-03-26 21:54:10 +00:00
|
|
|
u := b.Broker.URL()
|
|
|
|
log.Printf("initialized broker: %s\n", (&u).String())
|
2015-04-08 21:47:24 +00:00
|
|
|
} else {
|
|
|
|
log.Printf("broker already member of cluster. Using existing state and ignoring join URLs")
|
2015-03-26 20:30:03 +00:00
|
|
|
}
|
2015-01-07 00:21:32 +00:00
|
|
|
}
|
|
|
|
|
2015-03-10 20:53:45 +00:00
|
|
|
// joins a raft log to an existing cluster.
|
2015-03-26 23:04:12 +00:00
|
|
|
func joinLog(l *raft.Log, brokerURLs []url.URL) {
|
2015-01-07 00:21:32 +00:00
|
|
|
// Attempts to join each server until successful.
|
2015-03-26 23:04:12 +00:00
|
|
|
for _, u := range brokerURLs {
|
2015-04-14 19:43:25 +00:00
|
|
|
if err := l.Join(u); err == raft.ErrInitialized {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
2015-04-08 21:47:24 +00:00
|
|
|
log.Printf("join: failed to connect to raft cluster: %s: %s", (&u).String(), err)
|
2015-01-07 00:21:32 +00:00
|
|
|
} else {
|
2015-04-08 21:47:24 +00:00
|
|
|
log.Printf("join: connected raft log to %s", (&u).String())
|
2015-01-07 00:21:32 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-03-10 20:53:45 +00:00
|
|
|
log.Fatalf("join: failed to connect raft log to any specified server")
|
2015-01-07 00:21:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// creates and initializes a server.
|
2015-04-03 19:49:54 +00:00
|
|
|
func (cmd *RunCommand) openServer(joinURLs []url.URL) *influxdb.Server {
|
2015-03-11 18:00:45 +00:00
|
|
|
|
|
|
|
// Create messaging client to the brokers.
|
2015-04-16 17:47:00 +00:00
|
|
|
c := influxdb.NewMessagingClient(*cmd.node.ClusterURL())
|
2015-04-08 20:35:25 +00:00
|
|
|
c.SetURLs(joinURLs)
|
2015-04-01 18:01:17 +00:00
|
|
|
|
2015-03-25 20:27:20 +00:00
|
|
|
if err := c.Open(filepath.Join(cmd.config.Data.Dir, messagingClientFile)); err != nil {
|
2015-03-11 18:00:45 +00:00
|
|
|
log.Fatalf("messaging client error: %s", err)
|
|
|
|
}
|
|
|
|
|
2015-03-14 19:36:06 +00:00
|
|
|
// If no URLs exist on the client the return an error since we cannot reach a broker.
|
|
|
|
if len(c.URLs()) == 0 {
|
|
|
|
log.Fatal("messaging client has no broker URLs")
|
|
|
|
}
|
|
|
|
|
2015-01-07 00:21:32 +00:00
|
|
|
// Create and open the server.
|
2015-02-05 22:54:32 +00:00
|
|
|
s := influxdb.NewServer()
|
2015-03-25 20:27:20 +00:00
|
|
|
|
|
|
|
s.WriteTrace = cmd.config.Logging.WriteTracing
|
|
|
|
s.RetentionAutoCreate = cmd.config.Data.RetentionAutoCreate
|
|
|
|
s.RecomputePreviousN = cmd.config.ContinuousQuery.RecomputePreviousN
|
|
|
|
s.RecomputeNoOlderThan = time.Duration(cmd.config.ContinuousQuery.RecomputeNoOlderThan)
|
|
|
|
s.ComputeRunsPerInterval = cmd.config.ContinuousQuery.ComputeRunsPerInterval
|
|
|
|
s.ComputeNoMoreThan = time.Duration(cmd.config.ContinuousQuery.ComputeNoMoreThan)
|
2015-03-24 03:13:11 +00:00
|
|
|
s.Version = version
|
|
|
|
s.CommitHash = commit
|
2015-01-25 21:41:39 +00:00
|
|
|
|
2015-03-11 18:00:45 +00:00
|
|
|
// Open server with data directory and broker client.
|
2015-03-25 20:27:20 +00:00
|
|
|
if err := s.Open(cmd.config.Data.Dir, c); err != nil {
|
2015-04-08 21:47:24 +00:00
|
|
|
log.Fatalf("failed to open data node: %v", err.Error())
|
2014-12-16 03:35:26 +00:00
|
|
|
}
|
2015-04-14 19:43:25 +00:00
|
|
|
log.Printf("data node(%d) opened at %s", s.ID(), cmd.config.Data.Dir)
|
2015-01-07 00:21:32 +00:00
|
|
|
|
2015-04-14 19:43:25 +00:00
|
|
|
// Give brokers time to elect a leader if entire cluster is being restarted.
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
2015-04-20 20:23:23 +00:00
|
|
|
if s.ID() == 0 {
|
|
|
|
joinOrInitializeServer(s, *cmd.node.ClusterURL(), joinURLs)
|
2015-04-08 21:47:24 +00:00
|
|
|
} else {
|
|
|
|
log.Printf("data node already member of cluster. Using existing state and ignoring join URLs")
|
2015-03-26 21:54:10 +00:00
|
|
|
}
|
|
|
|
|
2014-12-31 19:42:53 +00:00
|
|
|
return s
|
|
|
|
}
|
2014-12-16 03:35:26 +00:00
|
|
|
|
2015-04-21 16:05:42 +00:00
|
|
|
// joinOrInitializeServer joins a new server to an existing cluster or initializes it as the first
|
|
|
|
// member of the cluster
|
2015-04-20 20:23:23 +00:00
|
|
|
func joinOrInitializeServer(s *influxdb.Server, u url.URL, joinURLs []url.URL) {
|
2015-01-07 00:21:32 +00:00
|
|
|
// Create data node on an existing data node.
|
|
|
|
for _, joinURL := range joinURLs {
|
2015-04-14 19:43:25 +00:00
|
|
|
if err := s.Join(&u, &joinURL); err == influxdb.ErrDataNodeNotFound {
|
2015-04-03 19:49:54 +00:00
|
|
|
// No data nodes could be found to join. We're the first.
|
2015-04-14 19:43:25 +00:00
|
|
|
if err := s.Initialize(u); err != nil {
|
|
|
|
log.Fatalf("server initialization error(1): %s", err)
|
2015-04-03 19:49:54 +00:00
|
|
|
}
|
2015-04-14 19:43:25 +00:00
|
|
|
log.Printf("initialized data node: %s\n", (&u).String())
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
2015-04-21 16:05:42 +00:00
|
|
|
// does not return so that the next joinURL can be tried
|
2015-04-08 21:47:24 +00:00
|
|
|
log.Printf("join: failed to connect data node: %s: %s", (&u).String(), err)
|
2015-01-07 00:21:32 +00:00
|
|
|
} else {
|
|
|
|
log.Printf("join: connected data node to %s", u)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-04-20 20:23:23 +00:00
|
|
|
|
|
|
|
if len(joinURLs) == 0 {
|
|
|
|
if err := s.Initialize(u); err != nil {
|
|
|
|
log.Fatalf("server initialization error(2): %s", err)
|
|
|
|
}
|
|
|
|
log.Printf("initialized data node: %s\n", (&u).String())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-01-07 00:21:32 +00:00
|
|
|
log.Fatalf("join: failed to connect data node to any specified server")
|
|
|
|
}
|
|
|
|
|
2014-12-31 19:42:53 +00:00
|
|
|
// parses a comma-delimited list of URLs.
|
2015-03-10 20:53:45 +00:00
|
|
|
func parseURLs(s string) (a []url.URL) {
|
2015-01-07 00:21:32 +00:00
|
|
|
if s == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-12-31 19:42:53 +00:00
|
|
|
for _, s := range strings.Split(s, ",") {
|
|
|
|
u, err := url.Parse(s)
|
|
|
|
if err != nil {
|
2015-01-07 00:21:32 +00:00
|
|
|
log.Fatalf("cannot parse urls: %s", err)
|
2014-12-31 19:42:53 +00:00
|
|
|
}
|
2015-03-10 20:53:45 +00:00
|
|
|
a = append(a, *u)
|
2014-12-16 06:05:01 +00:00
|
|
|
}
|
2014-12-31 19:42:53 +00:00
|
|
|
return
|
|
|
|
}
|
2014-12-06 01:02:30 +00:00
|
|
|
|
2014-12-31 19:42:53 +00:00
|
|
|
// returns true if the file exists.
|
|
|
|
func fileExists(path string) bool {
|
|
|
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
2014-12-06 01:02:30 +00:00
|
|
|
}
|
2015-05-29 19:50:05 +00:00
|
|
|
*/
|