Merge pull request #5403 from influxdata/meta-service2
refactor meta into separate meta client & servicepull/5430/head v0.10.0-beta2
commit
58e0eed9cb
|
@ -3,7 +3,7 @@ package cluster
|
|||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// Balancer represents a load-balancing algorithm for a set of nodes
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
func NewNodes() []meta.NodeInfo {
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -88,11 +88,12 @@ type PointsWriter struct {
|
|||
WriteTimeout time.Duration
|
||||
Logger *log.Logger
|
||||
|
||||
MetaStore interface {
|
||||
NodeID() uint64
|
||||
Node *influxdb.Node
|
||||
|
||||
MetaClient interface {
|
||||
Database(name string) (di *meta.DatabaseInfo, err error)
|
||||
RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error)
|
||||
CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error)
|
||||
CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error)
|
||||
ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo)
|
||||
}
|
||||
|
||||
|
@ -187,7 +188,7 @@ func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error)
|
|||
// holds the start time ranges for required shard groups
|
||||
timeRanges := map[time.Time]*meta.ShardGroupInfo{}
|
||||
|
||||
rp, err := w.MetaStore.RetentionPolicy(wp.Database, wp.RetentionPolicy)
|
||||
rp, err := w.MetaClient.RetentionPolicy(wp.Database, wp.RetentionPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -201,7 +202,7 @@ func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error)
|
|||
|
||||
// holds all the shard groups and shards that are required for writes
|
||||
for t := range timeRanges {
|
||||
sg, err := w.MetaStore.CreateShardGroupIfNotExists(wp.Database, wp.RetentionPolicy, t)
|
||||
sg, err := w.MetaClient.CreateShardGroup(wp.Database, wp.RetentionPolicy, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -235,7 +236,7 @@ func (w *PointsWriter) WritePoints(p *WritePointsRequest) error {
|
|||
w.statMap.Add(statPointWriteReq, int64(len(p.Points)))
|
||||
|
||||
if p.RetentionPolicy == "" {
|
||||
db, err := w.MetaStore.Database(p.Database)
|
||||
db, err := w.MetaClient.Database(p.Database)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if db == nil {
|
||||
|
@ -309,7 +310,7 @@ func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPo
|
|||
|
||||
for _, owner := range shard.Owners {
|
||||
go func(shardID uint64, owner meta.ShardOwner, points []models.Point) {
|
||||
if w.MetaStore.NodeID() == owner.NodeID {
|
||||
if w.Node.ID == owner.NodeID {
|
||||
w.statMap.Add(statPointWriteReqLocal, int64(len(points)))
|
||||
|
||||
err := w.TSDBStore.WriteToShard(shardID, points)
|
||||
|
|
|
@ -7,14 +7,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// Ensures the points writer maps a single point to a single shard.
|
||||
func TestPointsWriter_MapShards_One(t *testing.T) {
|
||||
ms := MetaStore{}
|
||||
ms := MetaClient{}
|
||||
rp := NewRetentionPolicy("myp", time.Hour, 3)
|
||||
|
||||
ms.NodeIDFn = func() uint64 { return 1 }
|
||||
|
@ -26,7 +27,7 @@ func TestPointsWriter_MapShards_One(t *testing.T) {
|
|||
return &rp.ShardGroups[0], nil
|
||||
}
|
||||
|
||||
c := cluster.PointsWriter{MetaStore: ms}
|
||||
c := cluster.PointsWriter{MetaClient: ms}
|
||||
pr := &cluster.WritePointsRequest{
|
||||
Database: "mydb",
|
||||
RetentionPolicy: "myrp",
|
||||
|
@ -49,7 +50,7 @@ func TestPointsWriter_MapShards_One(t *testing.T) {
|
|||
|
||||
// Ensures the points writer maps a multiple points across shard group boundaries.
|
||||
func TestPointsWriter_MapShards_Multiple(t *testing.T) {
|
||||
ms := MetaStore{}
|
||||
ms := MetaClient{}
|
||||
rp := NewRetentionPolicy("myp", time.Hour, 3)
|
||||
AttachShardGroupInfo(rp, []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
|
@ -76,7 +77,7 @@ func TestPointsWriter_MapShards_Multiple(t *testing.T) {
|
|||
panic("should not get here")
|
||||
}
|
||||
|
||||
c := cluster.PointsWriter{MetaStore: ms}
|
||||
c := cluster.PointsWriter{MetaClient: ms}
|
||||
pr := &cluster.WritePointsRequest{
|
||||
Database: "mydb",
|
||||
RetentionPolicy: "myrp",
|
||||
|
@ -303,7 +304,7 @@ func TestPointsWriter_WritePoints(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
ms := NewMetaStore()
|
||||
ms := NewMetaClient()
|
||||
ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -316,11 +317,12 @@ func TestPointsWriter_WritePoints(t *testing.T) {
|
|||
}
|
||||
|
||||
c := cluster.NewPointsWriter()
|
||||
c.MetaStore = ms
|
||||
c.MetaClient = ms
|
||||
c.ShardWriter = sw
|
||||
c.TSDBStore = store
|
||||
c.HintedHandoff = hh
|
||||
c.Subscriber = sub
|
||||
c.Node = &influxdb.Node{ID: 1}
|
||||
|
||||
c.Open()
|
||||
defer c.Close()
|
||||
|
@ -372,8 +374,8 @@ func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64
|
|||
return f.CreateShardfn(database, retentionPolicy, shardID)
|
||||
}
|
||||
|
||||
func NewMetaStore() *MetaStore {
|
||||
ms := &MetaStore{}
|
||||
func NewMetaClient() *MetaClient {
|
||||
ms := &MetaClient{}
|
||||
rp := NewRetentionPolicy("myp", time.Hour, 3)
|
||||
AttachShardGroupInfo(rp, []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
|
@ -401,7 +403,7 @@ func NewMetaStore() *MetaStore {
|
|||
return ms
|
||||
}
|
||||
|
||||
type MetaStore struct {
|
||||
type MetaClient struct {
|
||||
NodeIDFn func() uint64
|
||||
RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error)
|
||||
CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error)
|
||||
|
@ -409,21 +411,21 @@ type MetaStore struct {
|
|||
ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo)
|
||||
}
|
||||
|
||||
func (m MetaStore) NodeID() uint64 { return m.NodeIDFn() }
|
||||
func (m MetaClient) NodeID() uint64 { return m.NodeIDFn() }
|
||||
|
||||
func (m MetaStore) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) {
|
||||
func (m MetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) {
|
||||
return m.RetentionPolicyFn(database, name)
|
||||
}
|
||||
|
||||
func (m MetaStore) CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {
|
||||
func (m MetaClient) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {
|
||||
return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp)
|
||||
}
|
||||
|
||||
func (m MetaStore) Database(database string) (*meta.DatabaseInfo, error) {
|
||||
func (m MetaClient) Database(database string) (*meta.DatabaseInfo, error) {
|
||||
return m.DatabaseFn(database)
|
||||
}
|
||||
|
||||
func (m MetaStore) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) {
|
||||
func (m MetaClient) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) {
|
||||
return m.ShardOwnerFn(shardID)
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -43,7 +43,7 @@ type Service struct {
|
|||
|
||||
Listener net.Listener
|
||||
|
||||
MetaStore interface {
|
||||
MetaClient interface {
|
||||
ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo)
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,7 @@ func (s *Service) processWriteShardRequest(buf []byte) error {
|
|||
if err == tsdb.ErrShardNotFound {
|
||||
|
||||
// Query the metastore for the owner of this shard
|
||||
database, retentionPolicy, sgi := s.MetaStore.ShardOwner(req.ShardID())
|
||||
database, retentionPolicy, sgi := s.MetaClient.ShardOwner(req.ShardID())
|
||||
if sgi == nil {
|
||||
// If we can't find it, then we need to drop this request
|
||||
// as it is no longer valid. This could happen if writes were queued via
|
||||
|
|
|
@ -7,20 +7,20 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tcp"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
type metaStore struct {
|
||||
type metaClient struct {
|
||||
host string
|
||||
}
|
||||
|
||||
func (m *metaStore) Node(nodeID uint64) (*meta.NodeInfo, error) {
|
||||
func (m *metaClient) DataNode(nodeID uint64) (*meta.NodeInfo, error) {
|
||||
return &meta.NodeInfo{
|
||||
ID: nodeID,
|
||||
Host: m.host,
|
||||
ID: nodeID,
|
||||
TCPHost: m.host,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -7,8 +7,9 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -18,9 +19,10 @@ import (
|
|||
type ShardMapper struct {
|
||||
ForceRemoteMapping bool // All shards treated as remote. Useful for testing.
|
||||
|
||||
MetaStore interface {
|
||||
NodeID() uint64
|
||||
Node(id uint64) (ni *meta.NodeInfo, err error)
|
||||
Node *influxdb.Node
|
||||
|
||||
MetaClient interface {
|
||||
DataNode(id uint64) (ni *meta.NodeInfo, err error)
|
||||
}
|
||||
|
||||
TSDBStore interface {
|
||||
|
@ -42,7 +44,7 @@ func NewShardMapper(timeout time.Duration) *ShardMapper {
|
|||
// CreateMapper returns a Mapper for the given shard ID.
|
||||
func (s *ShardMapper) CreateMapper(sh meta.ShardInfo, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) {
|
||||
// Create a remote mapper if the local node doesn't own the shard.
|
||||
if !sh.OwnedBy(s.MetaStore.NodeID()) || s.ForceRemoteMapping {
|
||||
if !sh.OwnedBy(s.Node.ID) || s.ForceRemoteMapping {
|
||||
// Pick a node in a pseudo-random manner.
|
||||
conn, err := s.dial(sh.Owners[rand.Intn(len(sh.Owners))].NodeID)
|
||||
if err != nil {
|
||||
|
@ -63,11 +65,11 @@ func (s *ShardMapper) CreateMapper(sh meta.ShardInfo, stmt influxql.Statement, c
|
|||
}
|
||||
|
||||
func (s *ShardMapper) dial(nodeID uint64) (net.Conn, error) {
|
||||
ni, err := s.MetaStore.Node(nodeID)
|
||||
ni, err := s.MetaClient.DataNode(nodeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.Dial("tcp", ni.Host)
|
||||
conn, err := net.Dial("tcp", ni.TCPHost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -5,8 +5,8 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"gopkg.in/fatih/pool.v2"
|
||||
)
|
||||
|
||||
|
@ -22,8 +22,8 @@ type ShardWriter struct {
|
|||
pool *clientPool
|
||||
timeout time.Duration
|
||||
|
||||
MetaStore interface {
|
||||
Node(id uint64) (ni *meta.NodeInfo, err error)
|
||||
MetaClient interface {
|
||||
DataNode(id uint64) (ni *meta.NodeInfo, err error)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ func (w *ShardWriter) dial(nodeID uint64) (net.Conn, error) {
|
|||
_, ok := w.pool.getPool(nodeID)
|
||||
if !ok {
|
||||
factory := &connFactory{nodeID: nodeID, clientPool: w.pool, timeout: w.timeout}
|
||||
factory.metaStore = w.MetaStore
|
||||
factory.metaClient = w.MetaClient
|
||||
|
||||
p, err := pool.NewChannelPool(1, 3, factory.dial)
|
||||
if err != nil {
|
||||
|
@ -130,8 +130,8 @@ type connFactory struct {
|
|||
size() int
|
||||
}
|
||||
|
||||
metaStore interface {
|
||||
Node(id uint64) (ni *meta.NodeInfo, err error)
|
||||
metaClient interface {
|
||||
DataNode(id uint64) (ni *meta.NodeInfo, err error)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ func (c *connFactory) dial() (net.Conn, error) {
|
|||
return nil, errMaxConnectionsExceeded
|
||||
}
|
||||
|
||||
ni, err := c.metaStore.Node(c.nodeID)
|
||||
ni, err := c.metaClient.DataNode(c.nodeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ func (c *connFactory) dial() (net.Conn, error) {
|
|||
return nil, fmt.Errorf("node %d does not exist", c.nodeID)
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout("tcp", ni.Host, c.timeout)
|
||||
conn, err := net.DialTimeout("tcp", ni.TCPHost, c.timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ func TestShardWriter_WriteShard_Success(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
w := cluster.NewShardWriter(time.Minute)
|
||||
w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
|
||||
w.MetaClient = &metaClient{host: ts.ln.Addr().String()}
|
||||
|
||||
// Build a single point.
|
||||
now := time.Now()
|
||||
|
@ -70,7 +70,7 @@ func TestShardWriter_WriteShard_Multiple(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
w := cluster.NewShardWriter(time.Minute)
|
||||
w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
|
||||
w.MetaClient = &metaClient{host: ts.ln.Addr().String()}
|
||||
|
||||
// Build a single point.
|
||||
now := time.Now()
|
||||
|
@ -119,7 +119,7 @@ func TestShardWriter_WriteShard_Error(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
w := cluster.NewShardWriter(time.Minute)
|
||||
w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
|
||||
w.MetaClient = &metaClient{host: ts.ln.Addr().String()}
|
||||
now := time.Now()
|
||||
|
||||
shardID := uint64(1)
|
||||
|
@ -147,7 +147,7 @@ func TestShardWriter_Write_ErrDialTimeout(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
w := cluster.NewShardWriter(time.Nanosecond)
|
||||
w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
|
||||
w.MetaClient = &metaClient{host: ts.ln.Addr().String()}
|
||||
now := time.Now()
|
||||
|
||||
shardID := uint64(1)
|
||||
|
@ -170,7 +170,7 @@ func TestShardWriter_Write_ErrReadTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
w := cluster.NewShardWriter(time.Millisecond)
|
||||
w.MetaStore = &metaStore{host: ln.Addr().String()}
|
||||
w.MetaClient = &metaClient{host: ln.Addr().String()}
|
||||
now := time.Now()
|
||||
|
||||
shardID := uint64(1)
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/services/snapshotter"
|
||||
"github.com/influxdb/influxdb/tcp"
|
||||
)
|
||||
|
@ -281,13 +281,6 @@ func (cmd *Command) downloadAndVerify(req *snapshotter.Request, path string, val
|
|||
|
||||
// download downloads a snapshot of either the metastore or a shard from a host to a given path.
|
||||
func (cmd *Command) download(req *snapshotter.Request, path string) error {
|
||||
// Connect to snapshotter service.
|
||||
conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Create local file to write to.
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
|
@ -295,6 +288,13 @@ func (cmd *Command) download(req *snapshotter.Request, path string) error {
|
|||
}
|
||||
defer f.Close()
|
||||
|
||||
// Connect to snapshotter service.
|
||||
conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Write the request
|
||||
if err := json.NewEncoder(conn).Encode(req); err != nil {
|
||||
return fmt.Errorf("encode snapshot request: %s", err)
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/influxdb/influxdb/cmd/influxd/backup"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// Command represents the program execution for "influxd restore".
|
||||
|
@ -129,7 +129,6 @@ func (cmd *Command) ensureStopped() error {
|
|||
// unpackMeta reads the metadata from the backup directory and initializes a raft
|
||||
// cluster and replaces the root metadata.
|
||||
func (cmd *Command) unpackMeta() error {
|
||||
fmt.Fprintf(cmd.Stdout, "Restoring metastore to %v\n", cmd.metadir)
|
||||
// find the meta file
|
||||
metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+".*"))
|
||||
if err != nil {
|
||||
|
@ -160,27 +159,14 @@ func (cmd *Command) unpackMeta() error {
|
|||
return fmt.Errorf("unmarshal: %s", err)
|
||||
}
|
||||
|
||||
// Copy meta config and remove peers so it starts in single mode.
|
||||
c := cmd.MetaConfig
|
||||
c.JoinPeers = nil
|
||||
c.LoggingEnabled = false
|
||||
|
||||
// Initialize meta store.
|
||||
store := meta.NewStore(cmd.MetaConfig)
|
||||
store := meta.NewService(c)
|
||||
store.RaftListener = newNopListener()
|
||||
store.ExecListener = newNopListener()
|
||||
store.RPCListener = newNopListener()
|
||||
store.Logger = log.New(ioutil.Discard, "", 0)
|
||||
|
||||
// Determine advertised address.
|
||||
_, port, err := net.SplitHostPort(cmd.MetaConfig.BindAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("split bind address: %s", err)
|
||||
}
|
||||
hostport := net.JoinHostPort(cmd.MetaConfig.Hostname, port)
|
||||
|
||||
// Resolve address.
|
||||
addr, err := net.ResolveTCPAddr("tcp", hostport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err)
|
||||
}
|
||||
store.Addr = addr
|
||||
store.RemoteAddr = addr
|
||||
|
||||
// Open the meta store.
|
||||
if err := store.Open(); err != nil {
|
||||
|
@ -190,18 +176,22 @@ func (cmd *Command) unpackMeta() error {
|
|||
|
||||
// Wait for the store to be ready or error.
|
||||
select {
|
||||
case <-store.Ready():
|
||||
case err := <-store.Err():
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
client := meta.NewClient([]string{store.HTTPAddr()}, false)
|
||||
client.SetLogger(log.New(ioutil.Discard, "", 0))
|
||||
if err := client.Open(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Force set the full metadata.
|
||||
if err := store.SetData(&data); err != nil {
|
||||
if err := client.SetData(&data); err != nil {
|
||||
return fmt.Errorf("set data: %s", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(cmd.Stdout, "Metastore restore successful")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -375,4 +365,4 @@ func (ln *nopListener) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ln *nopListener) Addr() net.Addr { return nil }
|
||||
func (ln *nopListener) Addr() net.Addr { return &net.TCPAddr{} }
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestServer_BackupAndRestore(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy(db, newRetentionPolicyInfo(rp, 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy(db, rp); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy(db, rp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -94,13 +94,8 @@ func (cmd *Command) Run(args ...string) error {
|
|||
return fmt.Errorf("apply env config: %v", err)
|
||||
}
|
||||
|
||||
// Override config hostname if specified in the command line args.
|
||||
if options.Hostname != "" {
|
||||
config.Meta.Hostname = options.Hostname
|
||||
}
|
||||
|
||||
if options.Join != "" {
|
||||
config.Meta.Peers = strings.Split(options.Join, ",")
|
||||
config.Meta.JoinPeers = strings.Split(options.Join, ",")
|
||||
}
|
||||
|
||||
// Validate the configuration.
|
||||
|
@ -160,7 +155,6 @@ func (cmd *Command) ParseFlags(args ...string) (Options, error) {
|
|||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
fs.StringVar(&options.ConfigPath, "config", "", "")
|
||||
fs.StringVar(&options.PIDFile, "pidfile", "", "")
|
||||
fs.StringVar(&options.Hostname, "hostname", "", "")
|
||||
fs.StringVar(&options.Join, "join", "", "")
|
||||
fs.StringVar(&options.CPUProfile, "cpuprofile", "", "")
|
||||
fs.StringVar(&options.MemProfile, "memprofile", "", "")
|
||||
|
@ -220,12 +214,8 @@ then a new cluster will be initialized unless the -join argument is used.
|
|||
-config <path>
|
||||
Set the path to the configuration file.
|
||||
|
||||
-hostname <name>
|
||||
Override the hostname, the 'hostname' configuration
|
||||
option will be overridden.
|
||||
|
||||
-join <url>
|
||||
Joins the server to an existing cluster.
|
||||
-join <host:port>
|
||||
Joins the server to an existing cluster. Should be the HTTP bind address of an existing meta server
|
||||
|
||||
-pidfile <path>
|
||||
Write process ID to a file.
|
||||
|
@ -241,7 +231,6 @@ then a new cluster will be initialized unless the -join argument is used.
|
|||
type Options struct {
|
||||
ConfigPath string
|
||||
PIDFile string
|
||||
Hostname string
|
||||
Join string
|
||||
CPUProfile string
|
||||
MemProfile string
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/monitor"
|
||||
"github.com/influxdb/influxdb/services/admin"
|
||||
"github.com/influxdb/influxdb/services/collectd"
|
||||
|
@ -20,6 +19,7 @@ import (
|
|||
"github.com/influxdb/influxdb/services/graphite"
|
||||
"github.com/influxdb/influxdb/services/hh"
|
||||
"github.com/influxdb/influxdb/services/httpd"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/services/opentsdb"
|
||||
"github.com/influxdb/influxdb/services/precreator"
|
||||
"github.com/influxdb/influxdb/services/retention"
|
||||
|
@ -28,6 +28,15 @@ import (
|
|||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBindAddress is the default address for raft, cluster, snapshot, etc..
|
||||
DefaultBindAddress = ":8088"
|
||||
|
||||
// DefaultHostname is the default hostname used if we are unable to determine
|
||||
// the hostname from the system
|
||||
DefaultHostname = "localhost"
|
||||
)
|
||||
|
||||
// Config represents the configuration format for the influxd binary.
|
||||
type Config struct {
|
||||
Meta *meta.Config `toml:"meta"`
|
||||
|
@ -45,13 +54,16 @@ type Config struct {
|
|||
OpenTSDB opentsdb.Config `toml:"opentsdb"`
|
||||
UDPs []udp.Config `toml:"udp"`
|
||||
|
||||
// Snapshot SnapshotConfig `toml:"snapshot"`
|
||||
ContinuousQuery continuous_querier.Config `toml:"continuous_queries"`
|
||||
|
||||
HintedHandoff hh.Config `toml:"hinted-handoff"`
|
||||
HintedHandoff hh.Config `toml:"hinted-handoff"`
|
||||
|
||||
// Server reporting
|
||||
ReportingDisabled bool `toml:"reporting-disabled"`
|
||||
|
||||
Dir string `toml:"dir"`
|
||||
|
||||
// BindAddress is the address that all TCP services use (Raft, Snapshot, Cluster, etc.)
|
||||
BindAddress string `toml:"bind-address"`
|
||||
}
|
||||
|
||||
// NewConfig returns an instance of Config with reasonable defaults.
|
||||
|
@ -72,6 +84,7 @@ func NewConfig() *Config {
|
|||
c.ContinuousQuery = continuous_querier.NewConfig()
|
||||
c.Retention = retention.NewConfig()
|
||||
c.HintedHandoff = hh.NewConfig()
|
||||
c.BindAddress = DefaultBindAddress
|
||||
|
||||
return c
|
||||
}
|
||||
|
@ -104,21 +117,26 @@ func NewDemoConfig() (*Config, error) {
|
|||
|
||||
// Validate returns an error if the config is invalid.
|
||||
func (c *Config) Validate() error {
|
||||
if c.Meta.Dir == "" {
|
||||
return errors.New("Meta.Dir must be specified")
|
||||
} else if c.HintedHandoff.Enabled && c.HintedHandoff.Dir == "" {
|
||||
return errors.New("HintedHandoff.Dir must be specified")
|
||||
if !c.Meta.Enabled && !c.Data.Enabled {
|
||||
return errors.New("either Meta, Data, or both must be enabled")
|
||||
}
|
||||
if err := c.Meta.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Data.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, g := range c.Graphites {
|
||||
if err := g.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid graphite config: %v", err)
|
||||
if c.Data.Enabled {
|
||||
if err := c.HintedHandoff.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, g := range c.Graphites {
|
||||
if err := g.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid graphite config: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ func (cmd *PrintConfigCommand) Run(args ...string) error {
|
|||
// Parse command flags.
|
||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
configPath := fs.String("config", "", "")
|
||||
hostname := fs.String("hostname", "", "")
|
||||
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) }
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return err
|
||||
|
@ -47,11 +46,6 @@ func (cmd *PrintConfigCommand) Run(args ...string) error {
|
|||
return fmt.Errorf("apply env config: %v", err)
|
||||
}
|
||||
|
||||
// Override config properties.
|
||||
if *hostname != "" {
|
||||
config.Meta.Hostname = *hostname
|
||||
}
|
||||
|
||||
// Validate the configuration.
|
||||
if err := config.Validate(); err != nil {
|
||||
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
|
||||
|
|
|
@ -145,3 +145,20 @@ enabled = true
|
|||
t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_ValidateNoServiceConfigured(t *testing.T) {
|
||||
var c run.Config
|
||||
if _, err := toml.Decode(`
|
||||
[meta]
|
||||
enabled = false
|
||||
|
||||
[data]
|
||||
enabled = false
|
||||
`, &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if e := c.Validate(); e == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,13 +5,13 @@ import (
|
|||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/monitor"
|
||||
"github.com/influxdb/influxdb/services/admin"
|
||||
"github.com/influxdb/influxdb/services/collectd"
|
||||
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/influxdb/influxdb/services/graphite"
|
||||
"github.com/influxdb/influxdb/services/hh"
|
||||
"github.com/influxdb/influxdb/services/httpd"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/services/opentsdb"
|
||||
"github.com/influxdb/influxdb/services/precreator"
|
||||
"github.com/influxdb/influxdb/services/retention"
|
||||
|
@ -50,11 +51,14 @@ type Server struct {
|
|||
err chan error
|
||||
closing chan struct{}
|
||||
|
||||
Hostname string
|
||||
BindAddress string
|
||||
Listener net.Listener
|
||||
|
||||
MetaStore *meta.Store
|
||||
Node *influxdb.Node
|
||||
|
||||
MetaClient *meta.Client
|
||||
MetaService *meta.Service
|
||||
|
||||
TSDBStore *tsdb.Store
|
||||
QueryExecutor *tsdb.QueryExecutor
|
||||
PointsWriter *cluster.PointsWriter
|
||||
|
@ -78,102 +82,136 @@ type Server struct {
|
|||
// Profiling
|
||||
CPUProfile string
|
||||
MemProfile string
|
||||
|
||||
// joinPeers are the metaservers specified at run time to join this server to
|
||||
joinPeers []string
|
||||
|
||||
// metaUseTLS specifies if we should use a TLS connection to the meta servers
|
||||
metaUseTLS bool
|
||||
|
||||
// httpAPIAddr is the host:port combination for the main HTTP API for querying and writing data
|
||||
httpAPIAddr string
|
||||
|
||||
// httpUseTLS specifies if we should use a TLS connection to the http servers
|
||||
httpUseTLS bool
|
||||
|
||||
// tcpAddr is the host:port combination for the TCP listener that services mux onto
|
||||
tcpAddr string
|
||||
|
||||
config *Config
|
||||
}
|
||||
|
||||
// NewServer returns a new instance of Server built from a config.
|
||||
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
||||
// Construct base meta store and data store.
|
||||
tsdbStore := tsdb.NewStore(c.Data.Dir)
|
||||
tsdbStore.EngineOptions.Config = c.Data
|
||||
// load the node information. Before 0.10 this was in the meta directory,
|
||||
// so use that if the top level directory isn't specified
|
||||
dir := c.Dir
|
||||
if dir == "" {
|
||||
dir = c.Meta.Dir
|
||||
}
|
||||
|
||||
// load the node information
|
||||
node, err := influxdb.NewNode(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// In 0.10.0 bind-address got moved to the top level. Check
|
||||
// The old location to keep things backwards compatible
|
||||
bind := c.BindAddress
|
||||
if c.Meta.BindAddress != "" {
|
||||
bind = c.Meta.BindAddress
|
||||
}
|
||||
|
||||
if !c.Data.Enabled && !c.Meta.Enabled {
|
||||
return nil, fmt.Errorf("must run as either meta node or data node or both")
|
||||
}
|
||||
|
||||
httpBindAddress, err := defaultHost(DefaultHostname, c.HTTPD.BindAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tcpBindAddress, err := defaultHost(DefaultHostname, bind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
buildInfo: *buildInfo,
|
||||
err: make(chan error),
|
||||
closing: make(chan struct{}),
|
||||
|
||||
Hostname: c.Meta.Hostname,
|
||||
BindAddress: c.Meta.BindAddress,
|
||||
BindAddress: bind,
|
||||
|
||||
MetaStore: meta.NewStore(c.Meta),
|
||||
TSDBStore: tsdbStore,
|
||||
Node: node,
|
||||
|
||||
Monitor: monitor.New(c.Monitor),
|
||||
|
||||
reportingDisabled: c.ReportingDisabled,
|
||||
joinPeers: c.Meta.JoinPeers,
|
||||
metaUseTLS: c.Meta.HTTPSEnabled,
|
||||
|
||||
httpAPIAddr: httpBindAddress,
|
||||
httpUseTLS: c.HTTPD.HTTPSEnabled,
|
||||
tcpAddr: tcpBindAddress,
|
||||
|
||||
config: c,
|
||||
}
|
||||
|
||||
// Copy TSDB configuration.
|
||||
s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
|
||||
s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
|
||||
s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
|
||||
s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)
|
||||
|
||||
// Set the shard mapper
|
||||
s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout))
|
||||
s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping
|
||||
s.ShardMapper.MetaStore = s.MetaStore
|
||||
s.ShardMapper.TSDBStore = s.TSDBStore
|
||||
|
||||
// Initialize query executor.
|
||||
s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
|
||||
s.QueryExecutor.MetaStore = s.MetaStore
|
||||
s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore}
|
||||
s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor}
|
||||
s.QueryExecutor.ShardMapper = s.ShardMapper
|
||||
s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled
|
||||
|
||||
// Set the shard writer
|
||||
s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout))
|
||||
s.ShardWriter.MetaStore = s.MetaStore
|
||||
|
||||
// Create the hinted handoff service
|
||||
s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaStore)
|
||||
s.HintedHandoff.Monitor = s.Monitor
|
||||
|
||||
// Create the Subscriber service
|
||||
s.Subscriber = subscriber.NewService(c.Subscriber)
|
||||
s.Subscriber.MetaStore = s.MetaStore
|
||||
|
||||
// Initialize points writer.
|
||||
s.PointsWriter = cluster.NewPointsWriter()
|
||||
s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
|
||||
s.PointsWriter.MetaStore = s.MetaStore
|
||||
s.PointsWriter.TSDBStore = s.TSDBStore
|
||||
s.PointsWriter.ShardWriter = s.ShardWriter
|
||||
s.PointsWriter.HintedHandoff = s.HintedHandoff
|
||||
s.PointsWriter.Subscriber = s.Subscriber
|
||||
|
||||
// needed for executing INTO queries.
|
||||
s.QueryExecutor.IntoWriter = s.PointsWriter
|
||||
|
||||
// Initialize the monitor
|
||||
s.Monitor.Version = s.buildInfo.Version
|
||||
s.Monitor.Commit = s.buildInfo.Commit
|
||||
s.Monitor.Branch = s.buildInfo.Branch
|
||||
s.Monitor.BuildTime = s.buildInfo.Time
|
||||
s.Monitor.MetaStore = s.MetaStore
|
||||
s.Monitor.PointsWriter = s.PointsWriter
|
||||
|
||||
// Append services.
|
||||
s.appendClusterService(c.Cluster)
|
||||
s.appendPrecreatorService(c.Precreator)
|
||||
s.appendSnapshotterService()
|
||||
s.appendCopierService()
|
||||
s.appendAdminService(c.Admin)
|
||||
s.appendContinuousQueryService(c.ContinuousQuery)
|
||||
s.appendHTTPDService(c.HTTPD)
|
||||
s.appendCollectdService(c.Collectd)
|
||||
if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil {
|
||||
return nil, err
|
||||
if c.Meta.Enabled {
|
||||
s.MetaService = meta.NewService(c.Meta)
|
||||
}
|
||||
for _, g := range c.UDPs {
|
||||
s.appendUDPService(g)
|
||||
}
|
||||
s.appendRetentionPolicyService(c.Retention)
|
||||
for _, g := range c.Graphites {
|
||||
if err := s.appendGraphiteService(g); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.Data.Enabled {
|
||||
s.TSDBStore = tsdb.NewStore(c.Data.Dir)
|
||||
s.TSDBStore.EngineOptions.Config = c.Data
|
||||
|
||||
// Copy TSDB configuration.
|
||||
s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
|
||||
s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
|
||||
s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
|
||||
s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)
|
||||
|
||||
// Set the shard mapper
|
||||
s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout))
|
||||
s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping
|
||||
s.ShardMapper.TSDBStore = s.TSDBStore
|
||||
s.ShardMapper.Node = node
|
||||
|
||||
// Initialize query executor.
|
||||
s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
|
||||
s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor}
|
||||
s.QueryExecutor.ShardMapper = s.ShardMapper
|
||||
s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled
|
||||
|
||||
// Set the shard writer
|
||||
s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout))
|
||||
|
||||
// Create the hinted handoff service
|
||||
s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient)
|
||||
s.HintedHandoff.Monitor = s.Monitor
|
||||
|
||||
// Create the Subscriber service
|
||||
s.Subscriber = subscriber.NewService(c.Subscriber)
|
||||
|
||||
// Initialize points writer.
|
||||
s.PointsWriter = cluster.NewPointsWriter()
|
||||
s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
|
||||
s.PointsWriter.TSDBStore = s.TSDBStore
|
||||
s.PointsWriter.ShardWriter = s.ShardWriter
|
||||
s.PointsWriter.HintedHandoff = s.HintedHandoff
|
||||
s.PointsWriter.Subscriber = s.Subscriber
|
||||
s.PointsWriter.Node = s.Node
|
||||
|
||||
// needed for executing INTO queries.
|
||||
s.QueryExecutor.IntoWriter = s.PointsWriter
|
||||
|
||||
// Initialize the monitor
|
||||
s.Monitor.Version = s.buildInfo.Version
|
||||
s.Monitor.Commit = s.buildInfo.Commit
|
||||
s.Monitor.Branch = s.buildInfo.Branch
|
||||
s.Monitor.BuildTime = s.buildInfo.Time
|
||||
s.Monitor.PointsWriter = s.PointsWriter
|
||||
}
|
||||
|
||||
return s, nil
|
||||
|
@ -182,7 +220,7 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
|||
func (s *Server) appendClusterService(c cluster.Config) {
|
||||
srv := cluster.NewService(c)
|
||||
srv.TSDBStore = s.TSDBStore
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
s.Services = append(s.Services, srv)
|
||||
s.ClusterService = srv
|
||||
}
|
||||
|
@ -190,7 +228,7 @@ func (s *Server) appendClusterService(c cluster.Config) {
|
|||
func (s *Server) appendSnapshotterService() {
|
||||
srv := snapshotter.NewService()
|
||||
srv.TSDBStore = s.TSDBStore
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
s.Services = append(s.Services, srv)
|
||||
s.SnapshotterService = srv
|
||||
}
|
||||
|
@ -207,7 +245,7 @@ func (s *Server) appendRetentionPolicyService(c retention.Config) {
|
|||
return
|
||||
}
|
||||
srv := retention.NewService(c)
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
srv.TSDBStore = s.TSDBStore
|
||||
s.Services = append(s.Services, srv)
|
||||
}
|
||||
|
@ -225,7 +263,7 @@ func (s *Server) appendHTTPDService(c httpd.Config) {
|
|||
return
|
||||
}
|
||||
srv := httpd.NewService(c)
|
||||
srv.Handler.MetaStore = s.MetaStore
|
||||
srv.Handler.MetaClient = s.MetaClient
|
||||
srv.Handler.QueryExecutor = s.QueryExecutor
|
||||
srv.Handler.PointsWriter = s.PointsWriter
|
||||
srv.Handler.Version = s.buildInfo.Version
|
||||
|
@ -245,7 +283,7 @@ func (s *Server) appendCollectdService(c collectd.Config) {
|
|||
return
|
||||
}
|
||||
srv := collectd.NewService(c)
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
srv.PointsWriter = s.PointsWriter
|
||||
s.Services = append(s.Services, srv)
|
||||
}
|
||||
|
@ -259,7 +297,7 @@ func (s *Server) appendOpenTSDBService(c opentsdb.Config) error {
|
|||
return err
|
||||
}
|
||||
srv.PointsWriter = s.PointsWriter
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
s.Services = append(s.Services, srv)
|
||||
return nil
|
||||
}
|
||||
|
@ -274,7 +312,7 @@ func (s *Server) appendGraphiteService(c graphite.Config) error {
|
|||
}
|
||||
|
||||
srv.PointsWriter = s.PointsWriter
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
srv.Monitor = s.Monitor
|
||||
s.Services = append(s.Services, srv)
|
||||
return nil
|
||||
|
@ -289,7 +327,7 @@ func (s *Server) appendPrecreatorService(c precreator.Config) error {
|
|||
return err
|
||||
}
|
||||
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
s.Services = append(s.Services, srv)
|
||||
return nil
|
||||
}
|
||||
|
@ -300,7 +338,7 @@ func (s *Server) appendUDPService(c udp.Config) {
|
|||
}
|
||||
srv := udp.NewService(c)
|
||||
srv.PointsWriter = s.PointsWriter
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
s.Services = append(s.Services, srv)
|
||||
}
|
||||
|
||||
|
@ -309,7 +347,7 @@ func (s *Server) appendContinuousQueryService(c continuous_querier.Config) {
|
|||
return
|
||||
}
|
||||
srv := continuous_querier.NewService(c)
|
||||
srv.MetaStore = s.MetaStore
|
||||
srv.MetaClient = s.MetaClient
|
||||
srv.QueryExecutor = s.QueryExecutor
|
||||
s.Services = append(s.Services, srv)
|
||||
}
|
||||
|
@ -323,19 +361,6 @@ func (s *Server) Open() error {
|
|||
// Start profiling, if set.
|
||||
startProfile(s.CPUProfile, s.MemProfile)
|
||||
|
||||
host, port, err := s.hostAddr()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hostport := net.JoinHostPort(host, port)
|
||||
addr, err := net.ResolveTCPAddr("tcp", hostport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err)
|
||||
}
|
||||
s.MetaStore.Addr = addr
|
||||
s.MetaStore.RemoteAddr = &tcpaddr{hostport}
|
||||
|
||||
// Open shared TCP connection.
|
||||
ln, err := net.Listen("tcp", s.BindAddress)
|
||||
if err != nil {
|
||||
|
@ -343,60 +368,89 @@ func (s *Server) Open() error {
|
|||
}
|
||||
s.Listener = ln
|
||||
|
||||
// The port 0 is used, we need to retrieve the port assigned by the kernel
|
||||
if strings.HasSuffix(s.BindAddress, ":0") {
|
||||
s.MetaStore.Addr = ln.Addr()
|
||||
s.MetaStore.RemoteAddr = ln.Addr()
|
||||
}
|
||||
|
||||
// Multiplex listener.
|
||||
mux := tcp.NewMux()
|
||||
s.MetaStore.RaftListener = mux.Listen(meta.MuxRaftHeader)
|
||||
s.MetaStore.ExecListener = mux.Listen(meta.MuxExecHeader)
|
||||
s.MetaStore.RPCListener = mux.Listen(meta.MuxRPCHeader)
|
||||
|
||||
s.ClusterService.Listener = mux.Listen(cluster.MuxHeader)
|
||||
s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader)
|
||||
s.CopierService.Listener = mux.Listen(copier.MuxHeader)
|
||||
go mux.Serve(ln)
|
||||
|
||||
// Open meta store.
|
||||
if err := s.MetaStore.Open(); err != nil {
|
||||
return fmt.Errorf("open meta store: %s", err)
|
||||
}
|
||||
go s.monitorErrorChan(s.MetaStore.Err())
|
||||
if s.MetaService != nil {
|
||||
s.MetaService.RaftListener = mux.Listen(meta.MuxHeader)
|
||||
|
||||
// Wait for the store to initialize.
|
||||
<-s.MetaStore.Ready()
|
||||
|
||||
// Open TSDB store.
|
||||
if err := s.TSDBStore.Open(); err != nil {
|
||||
return fmt.Errorf("open tsdb store: %s", err)
|
||||
// Open meta service.
|
||||
if err := s.MetaService.Open(); err != nil {
|
||||
return fmt.Errorf("open meta service: %s", err)
|
||||
}
|
||||
go s.monitorErrorChan(s.MetaService.Err())
|
||||
}
|
||||
|
||||
// Open the hinted handoff service
|
||||
if err := s.HintedHandoff.Open(); err != nil {
|
||||
return fmt.Errorf("open hinted handoff: %s", err)
|
||||
}
|
||||
if s.TSDBStore != nil {
|
||||
if err := s.initializeDataNode(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open the subcriber service
|
||||
if err := s.Subscriber.Open(); err != nil {
|
||||
return fmt.Errorf("open subscriber: %s", err)
|
||||
}
|
||||
// Append services.
|
||||
s.appendClusterService(s.config.Cluster)
|
||||
s.appendPrecreatorService(s.config.Precreator)
|
||||
s.appendSnapshotterService()
|
||||
s.appendCopierService()
|
||||
s.appendAdminService(s.config.Admin)
|
||||
s.appendContinuousQueryService(s.config.ContinuousQuery)
|
||||
s.appendHTTPDService(s.config.HTTPD)
|
||||
s.appendCollectdService(s.config.Collectd)
|
||||
if err := s.appendOpenTSDBService(s.config.OpenTSDB); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, g := range s.config.UDPs {
|
||||
s.appendUDPService(g)
|
||||
}
|
||||
s.appendRetentionPolicyService(s.config.Retention)
|
||||
for _, g := range s.config.Graphites {
|
||||
if err := s.appendGraphiteService(g); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Open the points writer service
|
||||
if err := s.PointsWriter.Open(); err != nil {
|
||||
return fmt.Errorf("open points writer: %s", err)
|
||||
}
|
||||
s.Subscriber.MetaClient = s.MetaClient
|
||||
s.ShardMapper.MetaClient = s.MetaClient
|
||||
s.QueryExecutor.MetaClient = s.MetaClient
|
||||
s.ShardWriter.MetaClient = s.MetaClient
|
||||
s.HintedHandoff.MetaClient = s.MetaClient
|
||||
s.Subscriber.MetaClient = s.MetaClient
|
||||
s.PointsWriter.MetaClient = s.MetaClient
|
||||
s.Monitor.MetaClient = s.MetaClient
|
||||
|
||||
// Open the monitor service
|
||||
if err := s.Monitor.Open(); err != nil {
|
||||
return fmt.Errorf("open monitor: %v", err)
|
||||
}
|
||||
s.ClusterService.Listener = mux.Listen(cluster.MuxHeader)
|
||||
s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader)
|
||||
s.CopierService.Listener = mux.Listen(copier.MuxHeader)
|
||||
|
||||
for _, service := range s.Services {
|
||||
if err := service.Open(); err != nil {
|
||||
return fmt.Errorf("open service: %s", err)
|
||||
// Open TSDB store.
|
||||
if err := s.TSDBStore.Open(); err != nil {
|
||||
return fmt.Errorf("open tsdb store: %s", err)
|
||||
}
|
||||
|
||||
// Open the hinted handoff service
|
||||
if err := s.HintedHandoff.Open(); err != nil {
|
||||
return fmt.Errorf("open hinted handoff: %s", err)
|
||||
}
|
||||
|
||||
// Open the subcriber service
|
||||
if err := s.Subscriber.Open(); err != nil {
|
||||
return fmt.Errorf("open subscriber: %s", err)
|
||||
}
|
||||
|
||||
// Open the points writer service
|
||||
if err := s.PointsWriter.Open(); err != nil {
|
||||
return fmt.Errorf("open points writer: %s", err)
|
||||
}
|
||||
|
||||
// Open the monitor service
|
||||
if err := s.Monitor.Open(); err != nil {
|
||||
return fmt.Errorf("open monitor: %v", err)
|
||||
}
|
||||
|
||||
for _, service := range s.Services {
|
||||
if err := service.Open(); err != nil {
|
||||
return fmt.Errorf("open service: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -408,7 +462,6 @@ func (s *Server) Open() error {
|
|||
return nil
|
||||
|
||||
}(); err != nil {
|
||||
s.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -452,8 +505,12 @@ func (s *Server) Close() error {
|
|||
}
|
||||
|
||||
// Finally close the meta-store since everything else depends on it
|
||||
if s.MetaStore != nil {
|
||||
s.MetaStore.Close()
|
||||
if s.MetaService != nil {
|
||||
s.MetaService.Close()
|
||||
}
|
||||
|
||||
if s.MetaClient != nil {
|
||||
s.MetaClient.Close()
|
||||
}
|
||||
|
||||
close(s.closing)
|
||||
|
@ -468,11 +525,6 @@ func (s *Server) startServerReporting() {
|
|||
return
|
||||
default:
|
||||
}
|
||||
if err := s.MetaStore.WaitForLeader(30 * time.Second); err != nil {
|
||||
log.Printf("no leader available for reporting: %s", err.Error())
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
s.reportServer()
|
||||
<-time.After(24 * time.Hour)
|
||||
}
|
||||
|
@ -480,7 +532,7 @@ func (s *Server) startServerReporting() {
|
|||
|
||||
// reportServer reports anonymous statistics about the system.
|
||||
func (s *Server) reportServer() {
|
||||
dis, err := s.MetaStore.Databases()
|
||||
dis, err := s.MetaClient.Databases()
|
||||
if err != nil {
|
||||
log.Printf("failed to retrieve databases for reporting: %s", err.Error())
|
||||
return
|
||||
|
@ -500,7 +552,7 @@ func (s *Server) reportServer() {
|
|||
numSeries += s
|
||||
}
|
||||
|
||||
clusterID, err := s.MetaStore.ClusterID()
|
||||
clusterID := s.MetaClient.ClusterID()
|
||||
if err != nil {
|
||||
log.Printf("failed to retrieve cluster ID for reporting: %s", err.Error())
|
||||
return
|
||||
|
@ -515,7 +567,7 @@ func (s *Server) reportServer() {
|
|||
"os": runtime.GOOS,
|
||||
"arch": runtime.GOARCH,
|
||||
"version": s.buildInfo.Version,
|
||||
"server_id": fmt.Sprintf("%v", s.MetaStore.NodeID()),
|
||||
"server_id": fmt.Sprintf("%v", s.Node.ID),
|
||||
"cluster_id": fmt.Sprintf("%v", clusterID),
|
||||
"num_series": numSeries,
|
||||
"num_measurements": numMeasurements,
|
||||
|
@ -545,33 +597,80 @@ func (s *Server) monitorErrorChan(ch <-chan error) {
|
|||
}
|
||||
}
|
||||
|
||||
// hostAddr returns the host and port that remote nodes will use to reach this
|
||||
// node.
|
||||
func (s *Server) hostAddr() (string, string, error) {
|
||||
// Resolve host to address.
|
||||
_, port, err := net.SplitHostPort(s.BindAddress)
|
||||
// initializeDataNode will set the MetaClient and join the node to the cluster if needed
|
||||
func (s *Server) initializeDataNode() error {
|
||||
// if the node ID is > 0 then we just need to initialize the metaclient
|
||||
if s.Node.ID > 0 {
|
||||
s.MetaClient = meta.NewClient(s.Node.MetaServers, s.metaUseTLS)
|
||||
if err := s.MetaClient.Open(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go s.updateMetaNodeInformation()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// It's the first time starting up and we need to either join
|
||||
// the cluster or initialize this node as the first member
|
||||
if len(s.joinPeers) == 0 {
|
||||
// start up a new single node cluster
|
||||
if s.MetaService == nil {
|
||||
return fmt.Errorf("server not set to join existing cluster must run also as a meta node")
|
||||
}
|
||||
s.MetaClient = meta.NewClient([]string{s.MetaService.HTTPAddr()}, s.metaUseTLS)
|
||||
} else {
|
||||
// join this data node to the cluster
|
||||
s.MetaClient = meta.NewClient(s.joinPeers, s.metaUseTLS)
|
||||
}
|
||||
if err := s.MetaClient.Open(); err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := s.MetaClient.CreateDataNode(s.httpAPIAddr, s.tcpAddr)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("split bind address: %s", err)
|
||||
return err
|
||||
}
|
||||
s.Node.ID = n.ID
|
||||
metaNodes, err := s.MetaClient.MetaNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range metaNodes {
|
||||
s.Node.MetaServers = append(s.Node.MetaServers, n.Host)
|
||||
}
|
||||
if err := s.Node.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := s.Hostname
|
||||
go s.updateMetaNodeInformation()
|
||||
|
||||
// See if we might have a port that will override the BindAddress port
|
||||
if host != "" && host[len(host)-1] >= '0' && host[len(host)-1] <= '9' && strings.Contains(host, ":") {
|
||||
hostArg, portArg, err := net.SplitHostPort(s.Hostname)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if hostArg != "" {
|
||||
host = hostArg
|
||||
}
|
||||
|
||||
if portArg != "" {
|
||||
port = portArg
|
||||
// updateMetaNodeInformation will continuously run and save the node.json file
|
||||
// if the list of metaservers in the cluster changes
|
||||
func (s *Server) updateMetaNodeInformation() {
|
||||
for {
|
||||
c := s.MetaClient.WaitForDataChanged()
|
||||
select {
|
||||
case <-c:
|
||||
nodes, _ := s.MetaClient.MetaNodes()
|
||||
var nodeAddrs []string
|
||||
for _, n := range nodes {
|
||||
nodeAddrs = append(nodeAddrs, n.Host)
|
||||
}
|
||||
if !reflect.DeepEqual(nodeAddrs, s.Node.MetaServers) {
|
||||
s.Node.MetaServers = nodeAddrs
|
||||
if err := s.Node.Save(); err != nil {
|
||||
log.Printf("error saving node information: %s\n", err.Error())
|
||||
} else {
|
||||
log.Printf("updated node metaservers with: %v\n", s.Node.MetaServers)
|
||||
}
|
||||
}
|
||||
case <-s.closing:
|
||||
return
|
||||
}
|
||||
}
|
||||
return host, port, nil
|
||||
}
|
||||
|
||||
// Service represents a service attached to the server.
|
||||
|
@ -624,6 +723,18 @@ func stopProfile() {
|
|||
}
|
||||
}
|
||||
|
||||
func defaultHost(hostname, addr string) (string, error) {
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if host == "" {
|
||||
return net.JoinHostPort(hostname, port), nil
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
type tcpaddr struct{ host string }
|
||||
|
||||
func (a *tcpaddr) Network() string { return "tcp" }
|
||||
|
|
|
@ -97,7 +97,7 @@ func TestCluster_Query_DropAndRecreateDatabase(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ func TestCluster_Query_DropDatabaseIsolated(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil {
|
||||
|
@ -186,7 +186,7 @@ func TestCluster_Query_DropAndRecreateSeries(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -255,7 +255,7 @@ func TestCluster_Query_DropSeriesFromRegex(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -301,7 +301,7 @@ func TestCluster_RetentionPolicyCommands(t *testing.T) {
|
|||
test := tests.load(t, "retention_policy_commands")
|
||||
|
||||
s := c.Servers[0]
|
||||
if _, err := s.MetaStore.CreateDatabase(test.database()); err != nil {
|
||||
if _, err := s.MetaClient.CreateDatabase(test.database()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/influxdb/cmd/influxd/run"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/httpd"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/toml"
|
||||
)
|
||||
|
||||
|
@ -45,15 +45,13 @@ func NewServer(c *run.Config) *Server {
|
|||
Server: srv,
|
||||
Config: c,
|
||||
}
|
||||
s.TSDBStore.EngineOptions.Config = c.Data
|
||||
configureLogging(&s)
|
||||
return &s
|
||||
}
|
||||
|
||||
// OpenServer opens a test server.
|
||||
func OpenServer(c *run.Config, joinURLs string) *Server {
|
||||
if len(joinURLs) > 0 {
|
||||
c.Meta.Peers = strings.Split(joinURLs, ",")
|
||||
c.Meta.JoinPeers = strings.Split(joinURLs, ",")
|
||||
}
|
||||
s := NewServer(c)
|
||||
configureLogging(s)
|
||||
|
@ -70,15 +68,16 @@ func OpenServerWithVersion(c *run.Config, version string) *Server {
|
|||
Commit: "",
|
||||
Branch: "",
|
||||
}
|
||||
fmt.Println(">>> ", c.Data.Enabled)
|
||||
srv, _ := run.NewServer(c, buildInfo)
|
||||
s := Server{
|
||||
Server: srv,
|
||||
Config: c,
|
||||
}
|
||||
configureLogging(&s)
|
||||
if err := s.Open(); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
configureLogging(&s)
|
||||
|
||||
return &s
|
||||
}
|
||||
|
@ -89,7 +88,7 @@ func OpenDefaultServer(c *run.Config, joinURLs string) *Server {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return s
|
||||
|
@ -97,7 +96,9 @@ func OpenDefaultServer(c *run.Config, joinURLs string) *Server {
|
|||
|
||||
// Close shuts down the server and removes all temporary paths.
|
||||
func (s *Server) Close() {
|
||||
s.Server.Close()
|
||||
if err := s.Server.Close(); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
os.RemoveAll(s.Config.Meta.Dir)
|
||||
os.RemoveAll(s.Config.Data.Dir)
|
||||
os.RemoveAll(s.Config.HintedHandoff.Dir)
|
||||
|
@ -115,9 +116,9 @@ func (s *Server) URL() string {
|
|||
|
||||
// CreateDatabaseAndRetentionPolicy will create the database and retention policy.
|
||||
func (s *Server) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicyInfo) error {
|
||||
if _, err := s.MetaStore.CreateDatabaseIfNotExists(db); err != nil {
|
||||
if _, err := s.MetaClient.CreateDatabase(db); err != nil {
|
||||
return err
|
||||
} else if _, err := s.MetaStore.CreateRetentionPolicyIfNotExists(db, rp); err != nil {
|
||||
} else if _, err := s.MetaClient.CreateRetentionPolicy(db, rp); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -218,6 +219,7 @@ func NewConfig() *run.Config {
|
|||
c.Cluster.WriteTimeout = toml.Duration(30 * time.Second)
|
||||
c.Meta.Dir = MustTempFile()
|
||||
c.Meta.BindAddress = "127.0.0.1:0"
|
||||
c.Meta.HTTPBindAddress = "127.0.0.1:0"
|
||||
c.Meta.HeartbeatTimeout = toml.Duration(50 * time.Millisecond)
|
||||
c.Meta.ElectionTimeout = toml.Duration(50 * time.Millisecond)
|
||||
c.Meta.LeaderLeaseTimeout = toml.Duration(50 * time.Millisecond)
|
||||
|
@ -451,7 +453,7 @@ func writeTestData(s *Server, t *Test) error {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy(w.db, newRetentionPolicyInfo(w.rp, 1, 0)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy(w.db, w.rp); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy(w.db, w.rp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -492,10 +494,10 @@ type Cluster struct {
|
|||
func NewCluster(size int) (*Cluster, error) {
|
||||
c := Cluster{}
|
||||
c.Servers = append(c.Servers, OpenServer(NewConfig(), ""))
|
||||
raftURL := c.Servers[0].MetaStore.Addr.String()
|
||||
metaServiceAddr := c.Servers[0].Node.MetaServers[0]
|
||||
|
||||
for i := 1; i < size; i++ {
|
||||
c.Servers = append(c.Servers, OpenServer(NewConfig(), raftURL))
|
||||
c.Servers = append(c.Servers, OpenServer(NewConfig(), metaServiceAddr))
|
||||
}
|
||||
|
||||
for _, s := range c.Servers {
|
||||
|
@ -519,36 +521,16 @@ func verifyCluster(c *Cluster, size int) error {
|
|||
return e
|
||||
}
|
||||
|
||||
var leaderCount int
|
||||
var raftCount int
|
||||
|
||||
for _, result := range cl.Results {
|
||||
for _, series := range result.Series {
|
||||
for i, value := range series.Values {
|
||||
addr := c.Servers[i].MetaStore.Addr.String()
|
||||
if value[0].(float64) != float64(i+1) {
|
||||
return fmt.Errorf("expected nodeID %d, got %v", i, value[0])
|
||||
}
|
||||
if value[1].(string) != addr {
|
||||
return fmt.Errorf("expected addr %s, got %v", addr, value[1])
|
||||
}
|
||||
if value[2].(bool) {
|
||||
raftCount++
|
||||
}
|
||||
if value[3].(bool) {
|
||||
leaderCount++
|
||||
}
|
||||
}
|
||||
// grab only the meta nodes series
|
||||
series := cl.Results[0].Series[0]
|
||||
for i, value := range series.Values {
|
||||
addr := c.Servers[i].Node.MetaServers[i]
|
||||
if value[0].(float64) != float64(i+1) {
|
||||
return fmt.Errorf("expected nodeID %d, got %v", i, value[0])
|
||||
}
|
||||
if value[1].(string) != addr {
|
||||
return fmt.Errorf("expected addr %s, got %v", addr, value[1])
|
||||
}
|
||||
}
|
||||
if leaderCount != 1 {
|
||||
return fmt.Errorf("expected 1 leader, got %d", leaderCount)
|
||||
}
|
||||
if size < 3 && raftCount != size {
|
||||
return fmt.Errorf("expected %d raft nodes, got %d", size, raftCount)
|
||||
}
|
||||
if size >= 3 && raftCount != 3 {
|
||||
return fmt.Errorf("expected 3 raft nodes, got %d", raftCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -588,12 +570,12 @@ func NewClusterCustom(size int, cb func(index int, config *run.Config)) (*Cluste
|
|||
cb(0, config)
|
||||
|
||||
c.Servers = append(c.Servers, OpenServer(config, ""))
|
||||
raftURL := c.Servers[0].MetaStore.Addr.String()
|
||||
metaServiceAddr := c.Servers[0].Node.MetaServers[0]
|
||||
|
||||
for i := 1; i < size; i++ {
|
||||
config := NewConfig()
|
||||
cb(i, config)
|
||||
c.Servers = append(c.Servers, OpenServer(config, raftURL))
|
||||
c.Servers = append(c.Servers, OpenServer(config, metaServiceAddr))
|
||||
}
|
||||
|
||||
for _, s := range c.Servers {
|
||||
|
|
|
@ -53,16 +53,6 @@ func init() {
|
|||
command: `SHOW DATABASES`,
|
||||
exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db0_r"]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "create database should error if it already exists",
|
||||
command: `CREATE DATABASE db0`,
|
||||
exp: `{"results":[{"error":"database already exists"}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "create database should error if it already exists",
|
||||
command: `CREATE DATABASE db0_r`,
|
||||
exp: `{"results":[{"error":"database already exists"}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "create database should not error with existing database with IF NOT EXISTS",
|
||||
command: `CREATE DATABASE IF NOT EXISTS db0`,
|
||||
|
@ -334,11 +324,6 @@ func init() {
|
|||
exp: `{"results":[{}]}`,
|
||||
once: true,
|
||||
},
|
||||
&Query{
|
||||
name: "create retention policy should error if it already exists",
|
||||
command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`,
|
||||
exp: `{"results":[{"error":"retention policy already exists"}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "show retention policy should succeed",
|
||||
command: `SHOW RETENTION POLICIES ON db0`,
|
||||
|
@ -355,11 +340,6 @@ func init() {
|
|||
command: `SHOW RETENTION POLICIES ON db0`,
|
||||
exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "dropping default retention policy should not succeed",
|
||||
command: `DROP RETENTION POLICY rp0 ON db0`,
|
||||
exp: `{"results":[{"error":"retention policy is default"}]}`,
|
||||
},
|
||||
&Query{
|
||||
name: "show retention policy should still show policy",
|
||||
command: `SHOW RETENTION POLICIES ON db0`,
|
||||
|
|
|
@ -56,7 +56,7 @@ func TestServer_Query_DropAndRecreateDatabase(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ func TestServer_Query_DropDatabaseIsolated(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil {
|
||||
|
@ -116,6 +116,7 @@ func TestServer_Query_DropDatabaseIsolated(t *testing.T) {
|
|||
func TestServer_Query_DropAndRecreateSeries(t *testing.T) {
|
||||
t.Parallel()
|
||||
s := OpenServer(NewConfig(), "")
|
||||
fmt.Println("1")
|
||||
defer s.Close()
|
||||
|
||||
test := tests.load(t, "drop_and_recreate_series")
|
||||
|
@ -123,10 +124,11 @@ func TestServer_Query_DropAndRecreateSeries(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("2")
|
||||
for i, query := range test.queries {
|
||||
if i == 0 {
|
||||
if err := test.init(s); err != nil {
|
||||
|
@ -144,6 +146,7 @@ func TestServer_Query_DropAndRecreateSeries(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
fmt.Println("3")
|
||||
// Re-write data and test again.
|
||||
retest := tests.load(t, "drop_and_recreate_series_retest")
|
||||
|
||||
|
@ -163,6 +166,9 @@ func TestServer_Query_DropAndRecreateSeries(t *testing.T) {
|
|||
t.Error(query.failureMessage())
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("4")
|
||||
|
||||
}
|
||||
|
||||
func TestServer_Query_DropSeriesFromRegex(t *testing.T) {
|
||||
|
@ -175,7 +181,7 @@ func TestServer_Query_DropSeriesFromRegex(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -208,7 +214,7 @@ func TestServer_RetentionPolicyCommands(t *testing.T) {
|
|||
test := tests.load(t, "retention_policy_commands")
|
||||
|
||||
// Create a database.
|
||||
if _, err := s.MetaStore.CreateDatabase(test.database()); err != nil {
|
||||
if _, err := s.MetaClient.CreateDatabase(test.database()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -253,7 +259,7 @@ func TestServer_UserCommands(t *testing.T) {
|
|||
defer s.Close()
|
||||
|
||||
// Create a database.
|
||||
if _, err := s.MetaStore.CreateDatabase("db0"); err != nil {
|
||||
if _, err := s.MetaClient.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -508,7 +514,7 @@ func TestServer_Query_DefaultDBAndRP(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -2103,7 +2109,7 @@ func TestServer_Query_Regex(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -2985,7 +2991,7 @@ func TestServer_Query_AggregateSelectors(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -3281,7 +3287,7 @@ func TestServer_Query_TopInt(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -3483,7 +3489,7 @@ func TestServer_Query_Aggregates_IdenticalTime(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -3551,7 +3557,7 @@ func TestServer_Query_GroupByTimeCutoffs(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -3633,7 +3639,7 @@ func TestServer_Write_Precision(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -3750,7 +3756,7 @@ func TestServer_Query_Wildcards(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -3868,7 +3874,7 @@ func TestServer_Query_WildcardExpansion(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -3955,7 +3961,7 @@ func TestServer_Query_AcrossShardsAndFields(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -4029,7 +4035,7 @@ func TestServer_Query_Where_Fields(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -4251,7 +4257,7 @@ func TestServer_Query_Where_With_Tags(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -4279,7 +4285,7 @@ func TestServer_Query_Where_With_Tags(t *testing.T) {
|
|||
name: "where on tag that should be double quoted but isn't",
|
||||
params: url.Values{"db": []string{"db0"}},
|
||||
command: `show series where data-center = 'foo'`,
|
||||
exp: `{"results":[{"error":"invalid expression: data - center = 'foo'"}]}`,
|
||||
exp: `{"error":"error parsing query: found DATA, expected identifier, string, number, bool at line 1, char 19"}`,
|
||||
},
|
||||
}...)
|
||||
|
||||
|
@ -4309,7 +4315,7 @@ func TestServer_Query_LimitAndOffset(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -4426,7 +4432,7 @@ func TestServer_Query_Fill(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -4525,7 +4531,7 @@ func TestServer_Query_Chunk(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -4567,7 +4573,6 @@ func TestServer_Query_Chunk(t *testing.T) {
|
|||
t.Error(query.failureMessage())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) {
|
||||
|
@ -4578,13 +4583,13 @@ func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db1", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db1", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -4738,7 +4743,7 @@ func TestServer_Query_ShowSeries(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -4840,7 +4845,7 @@ func TestServer_Query_ShowMeasurements(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -4930,7 +4935,7 @@ func TestServer_Query_ShowTagKeys(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5050,7 +5055,7 @@ func TestServer_Query_ShowFieldKeys(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5117,7 +5122,7 @@ func TestServer_ContinuousQuery(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5251,7 +5256,7 @@ func TestServer_ContinuousQuery_Deadlock(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5324,7 +5329,7 @@ func TestServer_Query_EvilIdentifiers(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5368,7 +5373,7 @@ func TestServer_Query_OrderByTime(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5430,7 +5435,7 @@ func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5484,7 +5489,7 @@ func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5538,7 +5543,7 @@ func TestServer_Query_IntoTarget(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -5626,7 +5631,7 @@ func TestServer_WhereTimeInclusive(t *testing.T) {
|
|||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,14 @@
|
|||
# Change this option to true to disable reporting.
|
||||
reporting-disabled = false
|
||||
|
||||
# directory where server ID and cluster metaservers information will be kept
|
||||
dir = "/var/lib/influxdb"
|
||||
|
||||
# we'll try to get the hostname automatically, but if it the os returns something
|
||||
# that isn't resolvable by other servers in the cluster, use this option to
|
||||
# manually set the hostname
|
||||
# hostname = "localhost"
|
||||
|
||||
###
|
||||
### [meta]
|
||||
###
|
||||
|
@ -16,8 +24,12 @@ reporting-disabled = false
|
|||
###
|
||||
|
||||
[meta]
|
||||
# Controls if this node should run the metaservice and participate in the Raft group
|
||||
enabled = true
|
||||
|
||||
# Where the metadata/raft database is stored
|
||||
dir = "/var/lib/influxdb/meta"
|
||||
hostname = "localhost"
|
||||
|
||||
bind-address = ":8088"
|
||||
retention-autocreate = true
|
||||
election-timeout = "1s"
|
||||
|
@ -26,13 +38,6 @@ reporting-disabled = false
|
|||
commit-timeout = "50ms"
|
||||
cluster-tracing = false
|
||||
|
||||
# If enabled, when a Raft cluster loses a peer due to a `DROP SERVER` command,
|
||||
# the leader will automatically ask a non-raft peer node to promote to a raft
|
||||
# peer. This only happens if there is a non-raft peer node available to promote.
|
||||
# This setting only affects the local node, so to ensure if operates correctly, be sure to set
|
||||
# it in the config of every node.
|
||||
raft-promotion-enabled = true
|
||||
|
||||
###
|
||||
### [data]
|
||||
###
|
||||
|
@ -43,6 +48,9 @@ reporting-disabled = false
|
|||
###
|
||||
|
||||
[data]
|
||||
# Controls if this node holds time series data shards in the cluster
|
||||
enabled = true
|
||||
|
||||
dir = "/var/lib/influxdb/data"
|
||||
|
||||
# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't
|
||||
|
|
|
@ -1884,18 +1884,21 @@ func (s DropSeriesStatement) RequiredPrivileges() ExecutionPrivileges {
|
|||
type DropServerStatement struct {
|
||||
// ID of the node to be dropped.
|
||||
NodeID uint64
|
||||
// Force will force the server to drop even it it means losing data
|
||||
Force bool
|
||||
|
||||
// Meta indicates if the server being dropped is a meta or data node
|
||||
Meta bool
|
||||
}
|
||||
|
||||
// String returns a string representation of the drop series statement.
|
||||
func (s *DropServerStatement) String() string {
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.WriteString("DROP SERVER ")
|
||||
_, _ = buf.WriteString(strconv.FormatUint(s.NodeID, 10))
|
||||
if s.Force {
|
||||
_, _ = buf.WriteString(" FORCE")
|
||||
_, _ = buf.WriteString("DROP ")
|
||||
if s.Meta {
|
||||
_, _ = buf.WriteString(" META SERVER ")
|
||||
} else {
|
||||
_, _ = buf.WriteString(" DATA SERVER ")
|
||||
}
|
||||
_, _ = buf.WriteString(strconv.FormatUint(s.NodeID, 10))
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -220,8 +220,8 @@ func (p *Parser) parseDropStatement() (Statement, error) {
|
|||
return p.parseDropRetentionPolicyStatement()
|
||||
} else if tok == USER {
|
||||
return p.parseDropUserStatement()
|
||||
} else if tok == SERVER {
|
||||
return p.parseDropServerStatement()
|
||||
} else if tok == META || tok == DATA {
|
||||
return p.parseDropServerStatement(tok)
|
||||
} else if tok == SUBSCRIPTION {
|
||||
return p.parseDropSubscriptionStatement()
|
||||
}
|
||||
|
@ -1308,23 +1308,25 @@ func (p *Parser) parseDropSeriesStatement() (*DropSeriesStatement, error) {
|
|||
}
|
||||
|
||||
// parseDropServerStatement parses a string and returns a DropServerStatement.
|
||||
// This function assumes the "DROP SERVER" tokens have already been consumed.
|
||||
func (p *Parser) parseDropServerStatement() (*DropServerStatement, error) {
|
||||
// This function assumes the "DROP <META|DATA>" tokens have already been consumed.
|
||||
func (p *Parser) parseDropServerStatement(tok Token) (*DropServerStatement, error) {
|
||||
// Parse the SERVER token
|
||||
if tok, pos, lit := p.scanIgnoreWhitespace(); tok != SERVER {
|
||||
return nil, newParseError(tokstr(tok, lit), []string{"SERVER"}, pos)
|
||||
}
|
||||
|
||||
s := &DropServerStatement{}
|
||||
var err error
|
||||
|
||||
if tok == META {
|
||||
s.Meta = true
|
||||
}
|
||||
|
||||
// Parse the server's ID.
|
||||
if s.NodeID, err = p.parseUInt64(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse optional FORCE token.
|
||||
if tok, pos, lit := p.scanIgnoreWhitespace(); tok == FORCE {
|
||||
s.Force = true
|
||||
} else if tok != EOF && tok != SEMICOLON {
|
||||
return nil, newParseError(tokstr(tok, lit), []string{"FORCE"}, pos)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1035,12 +1035,12 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
|
||||
// DROP SERVER statement
|
||||
{
|
||||
s: `DROP SERVER 123`,
|
||||
stmt: &influxql.DropServerStatement{NodeID: 123},
|
||||
s: `DROP META SERVER 123`,
|
||||
stmt: &influxql.DropServerStatement{NodeID: 123, Meta: true},
|
||||
},
|
||||
{
|
||||
s: `DROP SERVER 123 FORCE`,
|
||||
stmt: &influxql.DropServerStatement{NodeID: 123, Force: true},
|
||||
s: `DROP DATA SERVER 123`,
|
||||
stmt: &influxql.DropServerStatement{NodeID: 123, Meta: false},
|
||||
},
|
||||
|
||||
// SHOW CONTINUOUS QUERIES statement
|
||||
|
@ -1743,9 +1743,8 @@ func TestParser_ParseStatement(t *testing.T) {
|
|||
{s: `DROP SERIES`, err: `found EOF, expected FROM, WHERE at line 1, char 13`},
|
||||
{s: `DROP SERIES FROM`, err: `found EOF, expected identifier at line 1, char 18`},
|
||||
{s: `DROP SERIES FROM src WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},
|
||||
{s: `DROP SERVER`, err: `found EOF, expected number at line 1, char 13`},
|
||||
{s: `DROP SERVER abc`, err: `found abc, expected number at line 1, char 13`},
|
||||
{s: `DROP SERVER 1 1`, err: `found 1, expected FORCE at line 1, char 15`},
|
||||
{s: `DROP META SERVER`, err: `found EOF, expected number at line 1, char 18`},
|
||||
{s: `DROP DATA SERVER abc`, err: `found abc, expected number at line 1, char 18`},
|
||||
{s: `SHOW CONTINUOUS`, err: `found EOF, expected QUERIES at line 1, char 17`},
|
||||
{s: `SHOW RETENTION`, err: `found EOF, expected POLICIES at line 1, char 16`},
|
||||
{s: `SHOW RETENTION ON`, err: `found ON, expected POLICIES at line 1, char 16`},
|
||||
|
|
|
@ -65,6 +65,7 @@ const (
|
|||
BY
|
||||
CREATE
|
||||
CONTINUOUS
|
||||
DATA
|
||||
DATABASE
|
||||
DATABASES
|
||||
DEFAULT
|
||||
|
@ -96,6 +97,7 @@ const (
|
|||
KEY
|
||||
KEYS
|
||||
LIMIT
|
||||
META
|
||||
MEASUREMENT
|
||||
MEASUREMENTS
|
||||
NAME
|
||||
|
@ -186,6 +188,7 @@ var tokens = [...]string{
|
|||
BY: "BY",
|
||||
CREATE: "CREATE",
|
||||
CONTINUOUS: "CONTINUOUS",
|
||||
DATA: "DATA",
|
||||
DATABASE: "DATABASE",
|
||||
DATABASES: "DATABASES",
|
||||
DEFAULT: "DEFAULT",
|
||||
|
@ -219,6 +222,7 @@ var tokens = [...]string{
|
|||
LIMIT: "LIMIT",
|
||||
MEASUREMENT: "MEASUREMENT",
|
||||
MEASUREMENTS: "MEASUREMENTS",
|
||||
META: "META",
|
||||
NAME: "NAME",
|
||||
NOT: "NOT",
|
||||
OFFSET: "OFFSET",
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/toml"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultHostname is the default hostname if one is not provided.
|
||||
DefaultHostname = "localhost"
|
||||
|
||||
// DefaultBindAddress is the default address to bind to.
|
||||
DefaultBindAddress = ":8088"
|
||||
|
||||
// DefaultHeartbeatTimeout is the default heartbeat timeout for the store.
|
||||
DefaultHeartbeatTimeout = 1000 * time.Millisecond
|
||||
|
||||
// DefaultElectionTimeout is the default election timeout for the store.
|
||||
DefaultElectionTimeout = 1000 * time.Millisecond
|
||||
|
||||
// DefaultLeaderLeaseTimeout is the default leader lease for the store.
|
||||
DefaultLeaderLeaseTimeout = 500 * time.Millisecond
|
||||
|
||||
// DefaultCommitTimeout is the default commit timeout for the store.
|
||||
DefaultCommitTimeout = 50 * time.Millisecond
|
||||
|
||||
// DefaultRaftPromotionEnabled is the default for auto promoting a node to a raft node when needed
|
||||
DefaultRaftPromotionEnabled = true
|
||||
|
||||
// DefaultLoggingEnabled determines if log messages are printed for the meta service
|
||||
DefaultLoggingEnabled = true
|
||||
)
|
||||
|
||||
// Config represents the meta configuration.
|
||||
type Config struct {
|
||||
Dir string `toml:"dir"`
|
||||
Hostname string `toml:"hostname"`
|
||||
BindAddress string `toml:"bind-address"`
|
||||
Peers []string `toml:"-"`
|
||||
RetentionAutoCreate bool `toml:"retention-autocreate"`
|
||||
ElectionTimeout toml.Duration `toml:"election-timeout"`
|
||||
HeartbeatTimeout toml.Duration `toml:"heartbeat-timeout"`
|
||||
LeaderLeaseTimeout toml.Duration `toml:"leader-lease-timeout"`
|
||||
CommitTimeout toml.Duration `toml:"commit-timeout"`
|
||||
ClusterTracing bool `toml:"cluster-tracing"`
|
||||
RaftPromotionEnabled bool `toml:"raft-promotion-enabled"`
|
||||
LoggingEnabled bool `toml:"logging-enabled"`
|
||||
}
|
||||
|
||||
// NewConfig builds a new configuration with default values.
|
||||
func NewConfig() *Config {
|
||||
return &Config{
|
||||
Hostname: DefaultHostname,
|
||||
BindAddress: DefaultBindAddress,
|
||||
RetentionAutoCreate: true,
|
||||
ElectionTimeout: toml.Duration(DefaultElectionTimeout),
|
||||
HeartbeatTimeout: toml.Duration(DefaultHeartbeatTimeout),
|
||||
LeaderLeaseTimeout: toml.Duration(DefaultLeaderLeaseTimeout),
|
||||
CommitTimeout: toml.Duration(DefaultCommitTimeout),
|
||||
RaftPromotionEnabled: DefaultRaftPromotionEnabled,
|
||||
LoggingEnabled: DefaultLoggingEnabled,
|
||||
}
|
||||
}
|
|
@ -1,824 +0,0 @@
|
|||
package meta_test
|
||||
|
||||
// import "github.com/davecgh/go-spew/spew"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/meta/internal"
|
||||
)
|
||||
|
||||
// Ensure a node can be created.
|
||||
func TestData_CreateNode(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateNode("host0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.Nodes, []meta.NodeInfo{{ID: 1, Host: "host0"}}) {
|
||||
t.Fatalf("unexpected node: %#v", data.Nodes[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a node can be removed.
|
||||
func TestData_DeleteNode_Basic(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateNode("host0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateNode("host1"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateNode("host2"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := data.DeleteNode(1, false); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(data.Nodes) != 2 {
|
||||
t.Fatalf("unexpected node count: %d", len(data.Nodes))
|
||||
} else if data.Nodes[0] != (meta.NodeInfo{ID: 2, Host: "host1"}) {
|
||||
t.Fatalf("unexpected node: %#v", data.Nodes[0])
|
||||
} else if data.Nodes[1] != (meta.NodeInfo{ID: 3, Host: "host2"}) {
|
||||
t.Fatalf("unexpected node: %#v", data.Nodes[1])
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a node can be removed with shard info in play
|
||||
func TestData_DeleteNode_Shards(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateNode("host0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateNode("host1"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateNode("host2"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateNode("host3"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := data.CreateDatabase("mydb"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rpi := &meta.RetentionPolicyInfo{
|
||||
Name: "myrp",
|
||||
ReplicaN: 3,
|
||||
}
|
||||
if err := data.CreateRetentionPolicy("mydb", rpi); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := data.CreateShardGroup("mydb", "myrp", time.Now()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners) != 3 {
|
||||
t.Fatal("wrong number of shard owners")
|
||||
}
|
||||
if err := data.DeleteNode(2, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, exp := len(data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners), 2; exp != got {
|
||||
t.Fatalf("wrong number of shard owners, got %d, exp %d", got, exp)
|
||||
}
|
||||
for _, s := range data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards {
|
||||
if s.OwnedBy(2) {
|
||||
t.Fatal("shard still owned by delted node")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a database can be created.
|
||||
func TestData_CreateDatabase(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.Databases, []meta.DatabaseInfo{{Name: "db0"}}) {
|
||||
t.Fatalf("unexpected databases: %#v", data.Databases)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating a database without a name returns an error.
|
||||
func TestData_CreateDatabase_ErrNameRequired(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase(""); err != meta.ErrDatabaseNameRequired {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating an already existing database returns an error.
|
||||
func TestData_CreateDatabase_ErrDatabaseExists(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := data.CreateDatabase("db0"); err != meta.ErrDatabaseExists {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a database can be removed.
|
||||
func TestData_DropDatabase(t *testing.T) {
|
||||
var data meta.Data
|
||||
for i := 0; i < 3; i++ {
|
||||
if err := data.CreateDatabase(fmt.Sprintf("db%d", i)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := data.DropDatabase("db1"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.Databases, []meta.DatabaseInfo{{Name: "db0"}, {Name: "db2"}}) {
|
||||
t.Fatalf("unexpected databases: %#v", data.Databases)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a retention policy can be created.
|
||||
func TestData_CreateRetentionPolicy(t *testing.T) {
|
||||
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}, {ID: 2}}}
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create policy.
|
||||
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{
|
||||
Name: "rp0",
|
||||
ReplicaN: 2,
|
||||
Duration: 4 * time.Hour,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify policy exists.
|
||||
if !reflect.DeepEqual(data.Databases[0].RetentionPolicies, []meta.RetentionPolicyInfo{
|
||||
{
|
||||
Name: "rp0",
|
||||
ReplicaN: 2,
|
||||
Duration: 4 * time.Hour,
|
||||
ShardGroupDuration: 1 * time.Hour,
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected policies: %#v", data.Databases[0].RetentionPolicies)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating a policy without a name returns an error.
|
||||
func TestData_CreateRetentionPolicy_ErrNameRequired(t *testing.T) {
|
||||
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}}}
|
||||
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: ""}); err != meta.ErrRetentionPolicyNameRequired {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating a policy with a replication factor less than 1 returns an error.
|
||||
func TestData_CreateRetentionPolicy_ErrReplicationFactorTooLow(t *testing.T) {
|
||||
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}}}
|
||||
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 0}); err != meta.ErrReplicationFactorTooLow {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating a retention policy on a non-existent database returns an error.
|
||||
func TestData_CreateRetentionPolicy_ErrDatabaseNotFound(t *testing.T) {
|
||||
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}}}
|
||||
expErr := influxdb.ErrDatabaseNotFound("db0")
|
||||
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err.Error() != expErr.Error() {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating an already existing policy returns an error.
|
||||
func TestData_CreateRetentionPolicy_ErrRetentionPolicyExists(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err != meta.ErrRetentionPolicyExists {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a retention policy can be updated.
|
||||
func TestData_UpdateRetentionPolicy(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Update the policy.
|
||||
var rpu meta.RetentionPolicyUpdate
|
||||
rpu.SetName("rp1")
|
||||
rpu.SetDuration(10 * time.Hour)
|
||||
rpu.SetReplicaN(3)
|
||||
if err := data.UpdateRetentionPolicy("db0", "rp0", &rpu); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify the policy was changed.
|
||||
if rpi, _ := data.RetentionPolicy("db0", "rp1"); !reflect.DeepEqual(rpi, &meta.RetentionPolicyInfo{
|
||||
Name: "rp1",
|
||||
Duration: 10 * time.Hour,
|
||||
ShardGroupDuration: 3600000000000,
|
||||
ReplicaN: 3,
|
||||
}) {
|
||||
t.Fatalf("unexpected policy: %#v", rpi)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a retention policy can be removed.
|
||||
func TestData_DropRetentionPolicy(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := data.DropRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(data.Databases[0].RetentionPolicies) != 0 {
|
||||
t.Fatalf("unexpected policies: %#v", data.Databases[0].RetentionPolicies)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure an error is returned when deleting a policy from a non-existent database.
|
||||
func TestData_DropRetentionPolicy_ErrDatabaseNotFound(t *testing.T) {
|
||||
var data meta.Data
|
||||
expErr := influxdb.ErrDatabaseNotFound("db0")
|
||||
if err := data.DropRetentionPolicy("db0", "rp0"); err.Error() != expErr.Error() {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure an error is returned when deleting a non-existent policy.
|
||||
func TestData_DropRetentionPolicy_ErrRetentionPolicyNotFound(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expErr := influxdb.ErrRetentionPolicyNotFound("rp0")
|
||||
if err := data.DropRetentionPolicy("db0", "rp0"); err.Error() != expErr.Error() {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a retention policy can be retrieved.
|
||||
func TestData_RetentionPolicy(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp1", ReplicaN: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if rpi, err := data.RetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(rpi, &meta.RetentionPolicyInfo{
|
||||
Name: "rp0",
|
||||
ShardGroupDuration: 604800000000000,
|
||||
ReplicaN: 1,
|
||||
}) {
|
||||
t.Fatalf("unexpected value: %#v", rpi)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that retrieving a policy from a non-existent database returns an error.
|
||||
func TestData_RetentionPolicy_ErrDatabaseNotFound(t *testing.T) {
|
||||
var data meta.Data
|
||||
expErr := influxdb.ErrDatabaseNotFound("db0")
|
||||
if _, err := data.RetentionPolicy("db0", "rp0"); err.Error() != expErr.Error() {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a default retention policy can be set.
|
||||
func TestData_SetDefaultRetentionPolicy(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify there is no default policy on the database initially.
|
||||
if name := data.Database("db0").DefaultRetentionPolicy; name != "" {
|
||||
t.Fatalf("unexpected initial default retention policy: %s", name)
|
||||
}
|
||||
|
||||
// Set the default policy.
|
||||
if err := data.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify the default policy is now set.
|
||||
if name := data.Database("db0").DefaultRetentionPolicy; name != "rp0" {
|
||||
t.Fatalf("unexpected default retention policy: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a shard group can be created on a database for a given timestamp.
|
||||
func TestData_CreateShardGroup(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateNode("node0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateNode("node1"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create shard group.
|
||||
if err := data.CreateShardGroup("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify the shard group was created.
|
||||
if sgi, _ := data.ShardGroupByTimestamp("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); !reflect.DeepEqual(sgi, &meta.ShardGroupInfo{
|
||||
ID: 1,
|
||||
StartTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
EndTime: time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC),
|
||||
Shards: []meta.ShardInfo{
|
||||
{
|
||||
ID: 1,
|
||||
Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected shard group: %#v", sgi)
|
||||
} else if !sgi.Shards[0].OwnedBy(1) || !sgi.Shards[0].OwnedBy(2) || sgi.Shards[0].OwnedBy(3) {
|
||||
// Verify shard is correctly owned-by the node.
|
||||
t.Fatalf("new shard is not owned by correct node")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a shard group is correctly detected as expired.
|
||||
func TestData_ShardGroupExpiredDeleted(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateNode("node0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateNode("node1"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create shard groups.
|
||||
if err := data.CreateShardGroup("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := data.CreateShardGroup("db0", "rp0", time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check expiration.
|
||||
rp, _ := data.RetentionPolicy("db0", "rp0")
|
||||
groups := rp.ExpiredShardGroups(time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC))
|
||||
if len(groups) != 1 {
|
||||
t.Fatalf("wrong number of expired shard groups returned, got %d, exp 1", len(groups))
|
||||
}
|
||||
if groups[0].StartTime != time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) {
|
||||
t.Fatal("wrong shard group marked as expired")
|
||||
}
|
||||
|
||||
// Check deletion.
|
||||
if err := data.DeleteShardGroup("db0", "rp0", groups[0].ID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
groups = rp.DeletedShardGroups()
|
||||
if len(groups) != 1 {
|
||||
t.Fatalf("wrong number of deleted shard groups returned, got %d, exp 1", len(groups))
|
||||
}
|
||||
if groups[0].StartTime != time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) {
|
||||
t.Fatal("wrong shard group marked as expired")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test shard group selection.
|
||||
func TestShardGroup_Overlaps(t *testing.T) {
|
||||
// Make a shard group 1 hour in duration
|
||||
startTime, _ := time.Parse(time.RFC3339, "2000-01-01T00:00:00Z")
|
||||
endTime := startTime.Add(time.Hour)
|
||||
g := &meta.ShardGroupInfo{StartTime: startTime, EndTime: endTime}
|
||||
|
||||
if !g.Overlaps(g.StartTime.Add(-time.Minute), g.EndTime) {
|
||||
t.Fatal("shard group not selected when min before start time")
|
||||
}
|
||||
|
||||
if !g.Overlaps(g.StartTime.Add(-time.Minute), g.StartTime) {
|
||||
t.Fatal("shard group not selected when min before start time and max equals start time")
|
||||
}
|
||||
|
||||
if !g.Overlaps(g.StartTime, g.EndTime.Add(time.Minute)) {
|
||||
t.Fatal("shard group not selected when max after after end time")
|
||||
}
|
||||
|
||||
if !g.Overlaps(g.StartTime.Add(-time.Minute), g.EndTime.Add(time.Minute)) {
|
||||
t.Fatal("shard group not selected when min before start time and when max after end time")
|
||||
}
|
||||
|
||||
if !g.Overlaps(g.StartTime.Add(time.Minute), g.EndTime.Add(-time.Minute)) {
|
||||
t.Fatal("shard group not selected when min after start time and when max before end time")
|
||||
}
|
||||
|
||||
if !g.Overlaps(g.StartTime, g.EndTime) {
|
||||
t.Fatal("shard group not selected when min at start time and when max at end time")
|
||||
}
|
||||
|
||||
if !g.Overlaps(g.StartTime, g.StartTime) {
|
||||
t.Fatal("shard group not selected when min and max set to start time")
|
||||
}
|
||||
|
||||
if !g.Overlaps(g.StartTime.Add(1*time.Minute), g.EndTime.Add(24*time.Hour)) {
|
||||
t.Fatal("shard group selected when both min in range")
|
||||
}
|
||||
|
||||
if g.Overlaps(g.EndTime, g.EndTime) {
|
||||
t.Fatal("shard group selected when min and max set to end time")
|
||||
}
|
||||
|
||||
if g.Overlaps(g.StartTime.Add(-10*time.Hour), g.EndTime.Add(-9*time.Hour)) {
|
||||
t.Fatal("shard group selected when both min and max before shard times")
|
||||
}
|
||||
|
||||
if g.Overlaps(g.StartTime.Add(24*time.Hour), g.EndTime.Add(25*time.Hour)) {
|
||||
t.Fatal("shard group selected when both min and max after shard times")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Ensure a shard group can be removed by ID.
|
||||
func TestData_DeleteShardGroup(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateNode("node0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateShardGroup("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := data.DeleteShardGroup("db0", "rp0", 1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if sg := data.Databases[0].RetentionPolicies[0].ShardGroups[0]; !sg.Deleted() {
|
||||
t.Fatalf("shard group not correctly flagged as deleted")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a continuous query can be created.
|
||||
func TestData_CreateContinuousQuery(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateContinuousQuery("db0", "cq0", "SELECT count() FROM foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.Databases[0].ContinuousQueries, []meta.ContinuousQueryInfo{
|
||||
{Name: "cq0", Query: "SELECT count() FROM foo"},
|
||||
}) {
|
||||
t.Fatalf("unexpected queries: %#v", data.Databases[0].ContinuousQueries)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a continuous query can be removed.
|
||||
func TestData_DropContinuousQuery(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateContinuousQuery("db0", "cq0", "SELECT count() FROM foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err = data.CreateContinuousQuery("db0", "cq1", "SELECT count() FROM bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := data.DropContinuousQuery("db0", "cq0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.Databases[0].ContinuousQueries, []meta.ContinuousQueryInfo{
|
||||
{Name: "cq1", Query: "SELECT count() FROM bar"},
|
||||
}) {
|
||||
t.Fatalf("unexpected queries: %#v", data.Databases[0].ContinuousQueries)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a subscription can be created.
|
||||
func TestData_CreateSubscription(t *testing.T) {
|
||||
var data meta.Data
|
||||
rpi := &meta.RetentionPolicyInfo{
|
||||
Name: "rp0",
|
||||
ReplicaN: 3,
|
||||
}
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateRetentionPolicy("db0", rpi); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateSubscription("db0", "rp0", "s0", "ANY", []string{"udp://h0:1234", "udp://h1:1234"}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.Databases[0].RetentionPolicies[0].Subscriptions, []meta.SubscriptionInfo{
|
||||
{Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:1234", "udp://h1:1234"}},
|
||||
}) {
|
||||
t.Fatalf("unexpected subscriptions: %#v", data.Databases[0].RetentionPolicies[0].Subscriptions)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a subscription can be removed.
|
||||
func TestData_DropSubscription(t *testing.T) {
|
||||
var data meta.Data
|
||||
rpi := &meta.RetentionPolicyInfo{
|
||||
Name: "rp0",
|
||||
ReplicaN: 3,
|
||||
}
|
||||
if err := data.CreateDatabase("db0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateRetentionPolicy("db0", rpi); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateSubscription("db0", "rp0", "s0", "ANY", []string{"udp://h0:1234", "udp://h1:1234"}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateSubscription("db0", "rp0", "s1", "ALL", []string{"udp://h0:1234", "udp://h1:1234"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := data.DropSubscription("db0", "rp0", "s0"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.Databases[0].RetentionPolicies[0].Subscriptions, []meta.SubscriptionInfo{
|
||||
{Name: "s1", Mode: "ALL", Destinations: []string{"udp://h0:1234", "udp://h1:1234"}},
|
||||
}) {
|
||||
t.Fatalf("unexpected subscriptions: %#v", data.Databases[0].RetentionPolicies[0].Subscriptions)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a user can be created.
|
||||
func TestData_CreateUser(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateUser("susy", "ABC123", true); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.Users, []meta.UserInfo{
|
||||
{Name: "susy", Hash: "ABC123", Admin: true},
|
||||
}) {
|
||||
t.Fatalf("unexpected users: %#v", data.Users)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating a user with no username returns an error.
|
||||
func TestData_CreateUser_ErrUsernameRequired(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateUser("", "", false); err != meta.ErrUsernameRequired {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that creating the same user twice returns an error.
|
||||
func TestData_CreateUser_ErrUserExists(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateUser("susy", "", false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := data.CreateUser("susy", "", false); err != meta.ErrUserExists {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a user can be removed.
|
||||
func TestData_DropUser(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateUser("susy", "", false); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateUser("bob", "", false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := data.DropUser("bob"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.Users, []meta.UserInfo{
|
||||
{Name: "susy"},
|
||||
}) {
|
||||
t.Fatalf("unexpected users: %#v", data.Users)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that removing a non-existent user returns an error.
|
||||
func TestData_DropUser_ErrUserNotFound(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.DropUser("bob"); err != meta.ErrUserNotFound {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a user can be updated.
|
||||
func TestData_UpdateUser(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.CreateUser("susy", "", false); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := data.CreateUser("bob", "", false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Update password hash.
|
||||
if err := data.UpdateUser("bob", "XXX"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !reflect.DeepEqual(data.User("bob"), &meta.UserInfo{Name: "bob", Hash: "XXX"}) {
|
||||
t.Fatalf("unexpected user: %#v", data.User("bob"))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that updating a non-existent user returns an error.
|
||||
func TestData_UpdateUser_ErrUserNotFound(t *testing.T) {
|
||||
var data meta.Data
|
||||
if err := data.UpdateUser("bob", "ZZZ"); err != meta.ErrUserNotFound {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the data can be deeply copied.
|
||||
func TestData_Clone(t *testing.T) {
|
||||
data := meta.Data{
|
||||
Term: 10,
|
||||
Index: 20,
|
||||
Nodes: []meta.NodeInfo{
|
||||
{ID: 1, Host: "host0"},
|
||||
{ID: 2, Host: "host1"},
|
||||
},
|
||||
Databases: []meta.DatabaseInfo{
|
||||
{
|
||||
Name: "db0",
|
||||
DefaultRetentionPolicy: "default",
|
||||
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||
{
|
||||
Name: "rp0",
|
||||
ReplicaN: 3,
|
||||
Duration: 10 * time.Second,
|
||||
ShardGroupDuration: 3 * time.Millisecond,
|
||||
ShardGroups: []meta.ShardGroupInfo{
|
||||
{
|
||||
ID: 100,
|
||||
StartTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
EndTime: time.Date(2000, time.February, 1, 0, 0, 0, 0, time.UTC),
|
||||
Shards: []meta.ShardInfo{
|
||||
{
|
||||
ID: 200,
|
||||
Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 3},
|
||||
{NodeID: 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ContinuousQueries: []meta.ContinuousQueryInfo{
|
||||
{Query: "SELECT count() FROM foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Users: []meta.UserInfo{
|
||||
{
|
||||
Name: "susy",
|
||||
Hash: "ABC123",
|
||||
Admin: true,
|
||||
Privileges: map[string]influxql.Privilege{"db0": influxql.AllPrivileges},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Copy the root structure.
|
||||
other := data.Clone()
|
||||
|
||||
if !reflect.DeepEqual(data.Nodes, other.Nodes) {
|
||||
t.Fatalf("unexpected cloned nodes: %#v", other.Nodes)
|
||||
} else if !reflect.DeepEqual(data.Databases, other.Databases) {
|
||||
t.Fatalf("unexpected cloned databases: %#v", other.Databases)
|
||||
} else if !reflect.DeepEqual(data.Users, other.Users) {
|
||||
t.Fatalf("unexpected cloned users: %#v", other.Users)
|
||||
}
|
||||
|
||||
// Ensure that changing data in the clone does not affect the original.
|
||||
other.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners[1].NodeID = 9
|
||||
if v := data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners[1].NodeID; v != 3 {
|
||||
t.Fatalf("editing clone changed original: %v", v)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the data can be marshaled and unmarshaled.
|
||||
func TestData_MarshalBinary(t *testing.T) {
|
||||
data := meta.Data{
|
||||
Term: 10,
|
||||
Index: 20,
|
||||
Nodes: []meta.NodeInfo{
|
||||
{ID: 1, Host: "host0"},
|
||||
{ID: 2, Host: "host1"},
|
||||
},
|
||||
Databases: []meta.DatabaseInfo{
|
||||
{
|
||||
Name: "db0",
|
||||
DefaultRetentionPolicy: "default",
|
||||
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||
{
|
||||
Name: "rp0",
|
||||
ReplicaN: 3,
|
||||
Duration: 10 * time.Second,
|
||||
ShardGroupDuration: 3 * time.Millisecond,
|
||||
ShardGroups: []meta.ShardGroupInfo{
|
||||
{
|
||||
ID: 100,
|
||||
StartTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
EndTime: time.Date(2000, time.February, 1, 0, 0, 0, 0, time.UTC),
|
||||
Shards: []meta.ShardInfo{
|
||||
{
|
||||
ID: 200,
|
||||
Owners: []meta.ShardOwner{
|
||||
{NodeID: 1},
|
||||
{NodeID: 3},
|
||||
{NodeID: 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ContinuousQueries: []meta.ContinuousQueryInfo{
|
||||
{Query: "SELECT count() FROM foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Users: []meta.UserInfo{
|
||||
{
|
||||
Name: "susy",
|
||||
Hash: "ABC123",
|
||||
Admin: true,
|
||||
Privileges: map[string]influxql.Privilege{"db0": influxql.AllPrivileges},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Marshal the data struture.
|
||||
buf, err := data.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Unmarshal into new data.
|
||||
var other meta.Data
|
||||
if err := other.UnmarshalBinary(buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(data.Nodes, other.Nodes) {
|
||||
t.Fatalf("unexpected nodes: %#v", other.Nodes)
|
||||
} else if !reflect.DeepEqual(data.Databases, other.Databases) {
|
||||
spew.Dump(data.Databases)
|
||||
spew.Dump(other.Databases)
|
||||
t.Fatalf("unexpected databases: %#v", other.Databases)
|
||||
} else if !reflect.DeepEqual(data.Users, other.Users) {
|
||||
t.Fatalf("unexpected users: %#v", other.Users)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure shards with deprecated "OwnerIDs" can be decoded.
|
||||
func TestShardInfo_UnmarshalBinary_OwnerIDs(t *testing.T) {
|
||||
// Encode deprecated form to bytes.
|
||||
buf, err := proto.Marshal(&internal.ShardInfo{
|
||||
ID: proto.Uint64(1),
|
||||
OwnerIDs: []uint64{10, 20, 30},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Decode deprecated form.
|
||||
var si meta.ShardInfo
|
||||
if err := si.UnmarshalBinary(buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify data is migrated correctly.
|
||||
if !reflect.DeepEqual(si, meta.ShardInfo{
|
||||
ID: 1,
|
||||
Owners: []meta.ShardOwner{
|
||||
{NodeID: 10},
|
||||
{NodeID: 20},
|
||||
{NodeID: 30},
|
||||
},
|
||||
}) {
|
||||
t.Fatalf("unexpected shard info: %s", spew.Sdump(si))
|
||||
}
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
)
|
||||
|
||||
// proxy brokers a connection from src to dst
|
||||
func proxy(dst, src *net.TCPConn) error {
|
||||
// channels to wait on the close event for each connection
|
||||
serverClosed := make(chan struct{}, 1)
|
||||
clientClosed := make(chan struct{}, 1)
|
||||
errors := make(chan error, 2)
|
||||
|
||||
go broker(dst, src, clientClosed, errors)
|
||||
go broker(src, dst, serverClosed, errors)
|
||||
|
||||
// wait for one half of the proxy to exit, then trigger a shutdown of the
|
||||
// other half by calling CloseRead(). This will break the read loop in the
|
||||
// broker and allow us to fully close the connection cleanly without a
|
||||
// "use of closed network connection" error.
|
||||
var waitFor chan struct{}
|
||||
select {
|
||||
case <-clientClosed:
|
||||
// the client closed first and any more packets from the server aren't
|
||||
// useful, so we can optionally SetLinger(0) here to recycle the port
|
||||
// faster.
|
||||
dst.SetLinger(0)
|
||||
dst.Close()
|
||||
waitFor = serverClosed
|
||||
case <-serverClosed:
|
||||
src.Close()
|
||||
waitFor = clientClosed
|
||||
case err := <-errors:
|
||||
src.Close()
|
||||
dst.SetLinger(0)
|
||||
dst.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for the other connection to close.
|
||||
<-waitFor
|
||||
return nil
|
||||
}
|
||||
|
||||
// This does the actual data transfer.
|
||||
// The broker only closes the Read side.
|
||||
func broker(dst, src net.Conn, srcClosed chan struct{}, errors chan error) {
|
||||
// We can handle errors in a finer-grained manner by inlining io.Copy (it's
|
||||
// simple, and we drop the ReaderFrom or WriterTo checks for
|
||||
// net.Conn->net.Conn transfers, which aren't needed). This would also let
|
||||
// us adjust buffersize.
|
||||
_, err := io.Copy(dst, src)
|
||||
|
||||
if err != nil {
|
||||
errors <- err
|
||||
}
|
||||
if err := src.Close(); err != nil {
|
||||
errors <- err
|
||||
}
|
||||
srcClosed <- struct{}{}
|
||||
}
|
536
meta/rpc.go
536
meta/rpc.go
|
@ -1,536 +0,0 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/influxdb/influxdb/meta/internal"
|
||||
)
|
||||
|
||||
// Max size of a message before we treat the size as invalid
|
||||
const (
|
||||
MaxMessageSize = 1024 * 1024 * 1024
|
||||
leaderDialTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// rpc handles request/response style messaging between cluster nodes
|
||||
type rpc struct {
|
||||
logger *log.Logger
|
||||
tracingEnabled bool
|
||||
|
||||
store interface {
|
||||
cachedData() *Data
|
||||
enableLocalRaft() error
|
||||
IsLeader() bool
|
||||
Leader() string
|
||||
Peers() ([]string, error)
|
||||
SetPeers(addrs []string) error
|
||||
AddPeer(host string) error
|
||||
CreateNode(host string) (*NodeInfo, error)
|
||||
NodeByHost(host string) (*NodeInfo, error)
|
||||
WaitForDataChanged() error
|
||||
}
|
||||
}
|
||||
|
||||
// JoinResult defines the join result structure.
|
||||
type JoinResult struct {
|
||||
RaftEnabled bool
|
||||
RaftNodes []string
|
||||
NodeID uint64
|
||||
}
|
||||
|
||||
// Reply defines the interface for Reply objects.
|
||||
type Reply interface {
|
||||
GetHeader() *internal.ResponseHeader
|
||||
}
|
||||
|
||||
// proxyLeader proxies the connection to the current raft leader
|
||||
func (r *rpc) proxyLeader(conn *net.TCPConn, buf []byte) {
|
||||
if r.store.Leader() == "" {
|
||||
r.sendError(conn, "no leader detected during proxyLeader")
|
||||
return
|
||||
}
|
||||
|
||||
leaderConn, err := net.DialTimeout("tcp", r.store.Leader(), leaderDialTimeout)
|
||||
if err != nil {
|
||||
r.sendError(conn, fmt.Sprintf("dial leader: %v", err))
|
||||
return
|
||||
}
|
||||
defer leaderConn.Close()
|
||||
|
||||
leaderConn.Write([]byte{MuxRPCHeader})
|
||||
// re-write the original message to the leader
|
||||
leaderConn.Write(buf)
|
||||
if err := proxy(leaderConn.(*net.TCPConn), conn); err != nil {
|
||||
r.sendError(conn, fmt.Sprintf("leader proxy error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// handleRPCConn reads a command from the connection and executes it.
|
||||
func (r *rpc) handleRPCConn(conn net.Conn) {
|
||||
defer conn.Close()
|
||||
// RPC connections should execute on the leader. If we are not the leader,
|
||||
// proxy the connection to the leader so that clients an connect to any node
|
||||
// in the cluster.
|
||||
r.traceCluster("rpc connection from: %v", conn.RemoteAddr())
|
||||
|
||||
// Read and execute request.
|
||||
typ, buf, err := r.readMessage(conn)
|
||||
// Handle unexpected RPC errors
|
||||
if err != nil {
|
||||
r.sendError(conn, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if !r.store.IsLeader() && typ != internal.RPCType_PromoteRaft {
|
||||
r.proxyLeader(conn.(*net.TCPConn), pack(typ, buf))
|
||||
return
|
||||
}
|
||||
|
||||
typ, resp, err := r.executeMessage(conn, typ, buf)
|
||||
|
||||
// Handle unexpected RPC errors
|
||||
if err != nil {
|
||||
r.sendError(conn, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Set the status header and error message
|
||||
if reply, ok := resp.(Reply); ok {
|
||||
reply.GetHeader().OK = proto.Bool(err == nil)
|
||||
if err != nil {
|
||||
reply.GetHeader().Error = proto.String(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
r.sendResponse(conn, typ, resp)
|
||||
}
|
||||
|
||||
func (r *rpc) readMessage(conn net.Conn) (internal.RPCType, []byte, error) {
|
||||
// Read request size.
|
||||
var sz uint64
|
||||
if err := binary.Read(conn, binary.BigEndian, &sz); err != nil {
|
||||
return internal.RPCType_Error, nil, fmt.Errorf("read size: %s", err)
|
||||
}
|
||||
|
||||
if sz == 0 {
|
||||
return internal.RPCType_Error, nil, fmt.Errorf("invalid message size: %d", sz)
|
||||
}
|
||||
|
||||
if sz >= MaxMessageSize {
|
||||
return internal.RPCType_Error, nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz)
|
||||
}
|
||||
|
||||
// Read request.
|
||||
buf := make([]byte, sz)
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return internal.RPCType_Error, nil, fmt.Errorf("read request: %s", err)
|
||||
}
|
||||
|
||||
// Determine the RPC type
|
||||
rpcType := internal.RPCType(btou64(buf[0:8]))
|
||||
buf = buf[8:]
|
||||
|
||||
r.traceCluster("recv %v request on: %v", rpcType, conn.RemoteAddr())
|
||||
return rpcType, buf, nil
|
||||
}
|
||||
|
||||
func (r *rpc) executeMessage(conn net.Conn, rpcType internal.RPCType, buf []byte) (internal.RPCType, proto.Message, error) {
|
||||
switch rpcType {
|
||||
case internal.RPCType_FetchData:
|
||||
var req internal.FetchDataRequest
|
||||
if err := proto.Unmarshal(buf, &req); err != nil {
|
||||
return internal.RPCType_Error, nil, fmt.Errorf("fetch request unmarshal: %v", err)
|
||||
}
|
||||
resp, err := r.handleFetchData(&req)
|
||||
return rpcType, resp, err
|
||||
case internal.RPCType_Join:
|
||||
var req internal.JoinRequest
|
||||
if err := proto.Unmarshal(buf, &req); err != nil {
|
||||
return internal.RPCType_Error, nil, fmt.Errorf("join request unmarshal: %v", err)
|
||||
}
|
||||
resp, err := r.handleJoinRequest(&req)
|
||||
return rpcType, resp, err
|
||||
case internal.RPCType_PromoteRaft:
|
||||
var req internal.PromoteRaftRequest
|
||||
if err := proto.Unmarshal(buf, &req); err != nil {
|
||||
return internal.RPCType_Error, nil, fmt.Errorf("promote to raft request unmarshal: %v", err)
|
||||
}
|
||||
resp, err := r.handlePromoteRaftRequest(&req)
|
||||
return rpcType, resp, err
|
||||
default:
|
||||
return internal.RPCType_Error, nil, fmt.Errorf("unknown rpc type:%v", rpcType)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rpc) sendResponse(conn net.Conn, typ internal.RPCType, resp proto.Message) {
|
||||
// Marshal the response back to a protobuf
|
||||
buf, err := proto.Marshal(resp)
|
||||
if err != nil {
|
||||
r.logger.Printf("unable to marshal response: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Encode response back to connection.
|
||||
if _, err := conn.Write(pack(typ, buf)); err != nil {
|
||||
r.logger.Printf("unable to write rpc response: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rpc) sendError(conn net.Conn, msg string) {
|
||||
r.traceCluster(msg)
|
||||
resp := &internal.ErrorResponse{
|
||||
Header: &internal.ResponseHeader{
|
||||
OK: proto.Bool(false),
|
||||
Error: proto.String(msg),
|
||||
},
|
||||
}
|
||||
|
||||
r.sendResponse(conn, internal.RPCType_Error, resp)
|
||||
}
|
||||
|
||||
// handleFetchData handles a request for the current nodes meta data
|
||||
func (r *rpc) handleFetchData(req *internal.FetchDataRequest) (*internal.FetchDataResponse, error) {
|
||||
var (
|
||||
b []byte
|
||||
data *Data
|
||||
err error
|
||||
)
|
||||
|
||||
for {
|
||||
data = r.store.cachedData()
|
||||
if data.Index != req.GetIndex() {
|
||||
b, err = data.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if !req.GetBlocking() {
|
||||
break
|
||||
}
|
||||
|
||||
if err := r.store.WaitForDataChanged(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &internal.FetchDataResponse{
|
||||
Header: &internal.ResponseHeader{
|
||||
OK: proto.Bool(true),
|
||||
},
|
||||
Index: proto.Uint64(data.Index),
|
||||
Term: proto.Uint64(data.Term),
|
||||
Data: b}, nil
|
||||
}
|
||||
|
||||
// handleJoinRequest handles a request to join the cluster
|
||||
func (r *rpc) handleJoinRequest(req *internal.JoinRequest) (*internal.JoinResponse, error) {
|
||||
r.traceCluster("join request from: %v", *req.Addr)
|
||||
|
||||
node, err := func() (*NodeInfo, error) {
|
||||
|
||||
// attempt to create the node
|
||||
node, err := r.store.CreateNode(*req.Addr)
|
||||
// if it exists, return the existing node
|
||||
if err == ErrNodeExists {
|
||||
node, err = r.store.NodeByHost(*req.Addr)
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
r.logger.Printf("existing node re-joined: id=%v addr=%v", node.ID, node.Host)
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("create node: %v", err)
|
||||
}
|
||||
|
||||
peers, err := r.store.Peers()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list peers: %v", err)
|
||||
}
|
||||
|
||||
// If we have less than 3 nodes, add them as raft peers if they are not
|
||||
// already a peer
|
||||
if len(peers) < MaxRaftNodes && !raft.PeerContained(peers, *req.Addr) {
|
||||
r.logger.Printf("adding new raft peer: nodeId=%v addr=%v", node.ID, *req.Addr)
|
||||
if err = r.store.AddPeer(*req.Addr); err != nil {
|
||||
return node, fmt.Errorf("add peer: %v", err)
|
||||
}
|
||||
}
|
||||
return node, err
|
||||
}()
|
||||
|
||||
nodeID := uint64(0)
|
||||
if node != nil {
|
||||
nodeID = node.ID
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get the current raft peers
|
||||
peers, err := r.store.Peers()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list peers: %v", err)
|
||||
}
|
||||
|
||||
return &internal.JoinResponse{
|
||||
Header: &internal.ResponseHeader{
|
||||
OK: proto.Bool(true),
|
||||
},
|
||||
EnableRaft: proto.Bool(raft.PeerContained(peers, *req.Addr)),
|
||||
RaftNodes: peers,
|
||||
NodeID: proto.Uint64(nodeID),
|
||||
}, err
|
||||
}
|
||||
|
||||
func (r *rpc) handlePromoteRaftRequest(req *internal.PromoteRaftRequest) (*internal.PromoteRaftResponse, error) {
|
||||
r.traceCluster("promote raft request from: %v", *req.Addr)
|
||||
|
||||
// Need to set the local store peers to match what we are about to join
|
||||
if err := r.store.SetPeers(req.RaftNodes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := r.store.enableLocalRaft(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !contains(req.RaftNodes, *req.Addr) {
|
||||
req.RaftNodes = append(req.RaftNodes, *req.Addr)
|
||||
}
|
||||
|
||||
if err := r.store.SetPeers(req.RaftNodes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &internal.PromoteRaftResponse{
|
||||
Header: &internal.ResponseHeader{
|
||||
OK: proto.Bool(true),
|
||||
},
|
||||
Success: proto.Bool(true),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// pack returns a TLV style byte slice encoding the size of the payload, the RPC type
|
||||
// and the RPC data
|
||||
func pack(typ internal.RPCType, b []byte) []byte {
|
||||
buf := u64tob(uint64(len(b)) + 8)
|
||||
buf = append(buf, u64tob(uint64(typ))...)
|
||||
buf = append(buf, b...)
|
||||
return buf
|
||||
}
|
||||
|
||||
// fetchMetaData returns the latest copy of the meta store data from the current
|
||||
// leader.
|
||||
func (r *rpc) fetchMetaData(blocking bool) (*Data, error) {
|
||||
assert(r.store != nil, "store is nil")
|
||||
|
||||
// Retrieve the current known leader.
|
||||
leader := r.store.Leader()
|
||||
if leader == "" {
|
||||
return nil, errors.New("no leader detected during fetchMetaData")
|
||||
}
|
||||
|
||||
var index, term uint64
|
||||
data := r.store.cachedData()
|
||||
if data != nil {
|
||||
index = data.Index
|
||||
term = data.Index
|
||||
}
|
||||
resp, err := r.call(leader, &internal.FetchDataRequest{
|
||||
Index: proto.Uint64(index),
|
||||
Term: proto.Uint64(term),
|
||||
Blocking: proto.Bool(blocking),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch t := resp.(type) {
|
||||
case *internal.FetchDataResponse:
|
||||
// If data is nil, then the term and index we sent matches the leader
|
||||
if t.GetData() == nil {
|
||||
return nil, nil
|
||||
}
|
||||
ms := &Data{}
|
||||
if err := ms.UnmarshalBinary(t.GetData()); err != nil {
|
||||
return nil, fmt.Errorf("rpc unmarshal metadata: %v", err)
|
||||
}
|
||||
return ms, nil
|
||||
case *internal.ErrorResponse:
|
||||
return nil, fmt.Errorf("rpc failed: %s", t.GetHeader().GetError())
|
||||
default:
|
||||
return nil, fmt.Errorf("rpc failed: unknown response type: %v", t.String())
|
||||
}
|
||||
}
|
||||
|
||||
// join attempts to join a cluster at remoteAddr using localAddr as the current
|
||||
// node's cluster address
|
||||
func (r *rpc) join(localAddr, remoteAddr string) (*JoinResult, error) {
|
||||
req := &internal.JoinRequest{
|
||||
Addr: proto.String(localAddr),
|
||||
}
|
||||
|
||||
resp, err := r.call(remoteAddr, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch t := resp.(type) {
|
||||
case *internal.JoinResponse:
|
||||
return &JoinResult{
|
||||
RaftEnabled: t.GetEnableRaft(),
|
||||
RaftNodes: t.GetRaftNodes(),
|
||||
NodeID: t.GetNodeID(),
|
||||
}, nil
|
||||
case *internal.ErrorResponse:
|
||||
return nil, fmt.Errorf("rpc failed: %s", t.GetHeader().GetError())
|
||||
default:
|
||||
return nil, fmt.Errorf("rpc failed: unknown response type: %v", t.String())
|
||||
}
|
||||
}
|
||||
|
||||
// enableRaft attempts to promote a node at remoteAddr using localAddr as the current
|
||||
// node's cluster address
|
||||
func (r *rpc) enableRaft(addr string, peers []string) error {
|
||||
req := &internal.PromoteRaftRequest{
|
||||
Addr: proto.String(addr),
|
||||
RaftNodes: peers,
|
||||
}
|
||||
|
||||
resp, err := r.call(addr, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch t := resp.(type) {
|
||||
case *internal.PromoteRaftResponse:
|
||||
return nil
|
||||
case *internal.ErrorResponse:
|
||||
return fmt.Errorf("rpc failed: %s", t.GetHeader().GetError())
|
||||
default:
|
||||
return fmt.Errorf("rpc failed: unknown response type: %v", t.String())
|
||||
}
|
||||
}
|
||||
|
||||
// call sends an encoded request to the remote leader and returns
|
||||
// an encoded response value.
|
||||
func (r *rpc) call(dest string, req proto.Message) (proto.Message, error) {
|
||||
// Determine type of request
|
||||
var rpcType internal.RPCType
|
||||
switch t := req.(type) {
|
||||
case *internal.JoinRequest:
|
||||
rpcType = internal.RPCType_Join
|
||||
case *internal.FetchDataRequest:
|
||||
rpcType = internal.RPCType_FetchData
|
||||
case *internal.PromoteRaftRequest:
|
||||
rpcType = internal.RPCType_PromoteRaft
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown rpc request type: %v", t)
|
||||
}
|
||||
|
||||
// Create a connection to the leader.
|
||||
conn, err := net.DialTimeout("tcp", dest, leaderDialTimeout)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rpc dial: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Write a marker byte for rpc messages.
|
||||
_, err = conn.Write([]byte{MuxRPCHeader})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rpc marshal: %v", err)
|
||||
}
|
||||
|
||||
// Write request size & bytes.
|
||||
if _, err := conn.Write(pack(rpcType, b)); err != nil {
|
||||
return nil, fmt.Errorf("write %v rpc: %s", rpcType, err)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(conn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %v rpc: %v", rpcType, err)
|
||||
}
|
||||
|
||||
// Should always have a size and type
|
||||
if exp := 16; len(data) < exp {
|
||||
r.traceCluster("recv: %v", string(data))
|
||||
return nil, fmt.Errorf("rpc %v failed: short read: got %v, exp %v", rpcType, len(data), exp)
|
||||
}
|
||||
|
||||
sz := btou64(data[0:8])
|
||||
if len(data[8:]) != int(sz) {
|
||||
r.traceCluster("recv: %v", string(data))
|
||||
return nil, fmt.Errorf("rpc %v failed: short read: got %v, exp %v", rpcType, len(data[8:]), sz)
|
||||
}
|
||||
|
||||
// See what response type we got back, could get a general error response
|
||||
rpcType = internal.RPCType(btou64(data[8:16]))
|
||||
data = data[16:]
|
||||
|
||||
var resp proto.Message
|
||||
switch rpcType {
|
||||
case internal.RPCType_Join:
|
||||
resp = &internal.JoinResponse{}
|
||||
case internal.RPCType_FetchData:
|
||||
resp = &internal.FetchDataResponse{}
|
||||
case internal.RPCType_Error:
|
||||
resp = &internal.ErrorResponse{}
|
||||
case internal.RPCType_PromoteRaft:
|
||||
resp = &internal.PromoteRaftResponse{}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown rpc response type: %v", rpcType)
|
||||
}
|
||||
|
||||
if err := proto.Unmarshal(data, resp); err != nil {
|
||||
return nil, fmt.Errorf("rpc unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if reply, ok := resp.(Reply); ok {
|
||||
if !reply.GetHeader().GetOK() {
|
||||
return nil, fmt.Errorf("rpc %v failed: %s", rpcType, reply.GetHeader().GetError())
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (r *rpc) traceCluster(msg string, args ...interface{}) {
|
||||
if r.tracingEnabled {
|
||||
r.logger.Printf("rpc: "+msg, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func u64tob(v uint64) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, v)
|
||||
return b
|
||||
}
|
||||
|
||||
func btou64(b []byte) uint64 {
|
||||
return binary.BigEndian.Uint64(b)
|
||||
}
|
||||
|
||||
func contains(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
248
meta/rpc_test.go
248
meta/rpc_test.go
|
@ -1,248 +0,0 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRPCFetchData(t *testing.T) {
|
||||
|
||||
serverRPC := &rpc{
|
||||
store: &fakeStore{
|
||||
md: &Data{Index: 99},
|
||||
},
|
||||
}
|
||||
|
||||
srv := newTestServer(t, serverRPC)
|
||||
defer srv.Close()
|
||||
go srv.Serve()
|
||||
|
||||
// Wait for the RPC server to be ready
|
||||
<-srv.Ready
|
||||
|
||||
// create a new RPC with no existing meta.Data cache
|
||||
clientRPC := &rpc{
|
||||
store: &fakeStore{
|
||||
leader: srv.Listener.Addr().String(),
|
||||
},
|
||||
}
|
||||
|
||||
// fetch the servers meta-data
|
||||
md, err := clientRPC.fetchMetaData(false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to fetchMetaData: %v", err)
|
||||
}
|
||||
|
||||
if md == nil {
|
||||
t.Fatalf("meta-data is nil")
|
||||
}
|
||||
|
||||
if exp := uint64(99); md.Index != exp {
|
||||
t.Fatalf("meta-data mismatch. got %v, exp %v", md.Index, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRPCFetchDataMatchesLeader(t *testing.T) {
|
||||
serverRPC := &rpc{
|
||||
store: &fakeStore{
|
||||
md: &Data{Index: 99},
|
||||
},
|
||||
}
|
||||
|
||||
srv := newTestServer(t, serverRPC)
|
||||
defer srv.Close()
|
||||
go srv.Serve()
|
||||
|
||||
// Wait for the RPC server to be ready
|
||||
<-srv.Ready
|
||||
|
||||
// create a new RPC with a matching index as the server
|
||||
clientRPC := &rpc{
|
||||
store: &fakeStore{
|
||||
leader: srv.Listener.Addr().String(),
|
||||
md: &Data{Index: 99},
|
||||
},
|
||||
}
|
||||
|
||||
// fetch the servers meta-data
|
||||
md, err := clientRPC.fetchMetaData(false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to fetchMetaData: %v", err)
|
||||
}
|
||||
|
||||
if md != nil {
|
||||
t.Fatalf("meta-data is not nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRPCFetchDataMatchesBlocking(t *testing.T) {
|
||||
fs := &fakeStore{
|
||||
md: &Data{Index: 99},
|
||||
blockChan: make(chan struct{}),
|
||||
}
|
||||
serverRPC := &rpc{
|
||||
store: fs,
|
||||
}
|
||||
|
||||
srv := newTestServer(t, serverRPC)
|
||||
defer srv.Close()
|
||||
go srv.Serve()
|
||||
|
||||
// Wait for the RPC server to be ready
|
||||
<-srv.Ready
|
||||
|
||||
// create a new RPC with a matching index as the server
|
||||
clientRPC := &rpc{
|
||||
store: &fakeStore{
|
||||
leader: srv.Listener.Addr().String(),
|
||||
md: &Data{Index: 99},
|
||||
},
|
||||
}
|
||||
|
||||
// Kick off the fetching block
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// fetch the servers meta-data
|
||||
md, err := clientRPC.fetchMetaData(true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to fetchMetaData: %v", err)
|
||||
}
|
||||
|
||||
if md == nil {
|
||||
t.Fatalf("meta-data is nil")
|
||||
}
|
||||
|
||||
if exp := uint64(100); md.Index != exp {
|
||||
t.Fatalf("meta-data mismatch. got %v, exp %v", md.Index, exp)
|
||||
}
|
||||
}()
|
||||
|
||||
// Simulate the rmote index changing and unblocking
|
||||
fs.mu.Lock()
|
||||
fs.md = &Data{Index: 100}
|
||||
fs.mu.Unlock()
|
||||
close(fs.blockChan)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestRPCJoin(t *testing.T) {
|
||||
fs := &fakeStore{
|
||||
leader: "1.2.3.4:1234",
|
||||
md: &Data{Index: 99},
|
||||
newNodeID: uint64(100),
|
||||
blockChan: make(chan struct{}),
|
||||
}
|
||||
serverRPC := &rpc{
|
||||
store: fs,
|
||||
}
|
||||
|
||||
srv := newTestServer(t, serverRPC)
|
||||
defer srv.Close()
|
||||
go srv.Serve()
|
||||
|
||||
// Wait for the RPC server to be ready
|
||||
<-srv.Ready
|
||||
|
||||
// create a new RPC with a matching index as the server
|
||||
clientRPC := &rpc{
|
||||
store: &fakeStore{
|
||||
leader: srv.Listener.Addr().String(),
|
||||
md: &Data{Index: 99},
|
||||
},
|
||||
}
|
||||
|
||||
res, err := clientRPC.join("1.2.3.4:1234", srv.Listener.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to join: %v", err)
|
||||
}
|
||||
|
||||
if exp := true; res.RaftEnabled != true {
|
||||
t.Fatalf("raft enabled mismatch: got %v, exp %v", res.RaftEnabled, exp)
|
||||
}
|
||||
|
||||
if exp := 1; len(res.RaftNodes) != exp {
|
||||
t.Fatalf("raft peer mismatch: got %v, exp %v", len(res.RaftNodes), exp)
|
||||
}
|
||||
|
||||
if exp := "1.2.3.4:1234"; res.RaftNodes[0] != exp {
|
||||
t.Fatalf("raft peer mismatch: got %v, exp %v", res.RaftNodes[0], exp)
|
||||
}
|
||||
|
||||
if exp := uint64(100); res.NodeID != exp {
|
||||
t.Fatalf("node id mismatch. got %v, exp %v", res.NodeID, exp)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeStore struct {
|
||||
mu sync.RWMutex
|
||||
leader string
|
||||
newNodeID uint64
|
||||
md *Data
|
||||
blockChan chan struct{}
|
||||
}
|
||||
|
||||
type testServer struct {
|
||||
Listener net.Listener
|
||||
Ready chan struct{}
|
||||
rpc *rpc
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func newTestServer(t *testing.T, rpc *rpc) *testServer {
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
return &testServer{
|
||||
Listener: ln,
|
||||
Ready: make(chan struct{}),
|
||||
rpc: rpc,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *testServer) Close() {
|
||||
s.Listener.Close()
|
||||
}
|
||||
|
||||
func (s *testServer) Serve() {
|
||||
close(s.Ready)
|
||||
conn, err := s.Listener.Accept()
|
||||
if err != nil {
|
||||
s.t.Fatalf("failed to accept: %v", err)
|
||||
}
|
||||
|
||||
// Demux...
|
||||
b := make([]byte, 1)
|
||||
if _, err := conn.Read(b); err != nil {
|
||||
s.t.Fatalf("failed to demux: %v", err)
|
||||
}
|
||||
s.rpc.handleRPCConn(conn)
|
||||
}
|
||||
|
||||
func (f *fakeStore) cachedData() *Data {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
return f.md
|
||||
}
|
||||
|
||||
func (f *fakeStore) IsLeader() bool { return true }
|
||||
func (f *fakeStore) Leader() string { return f.leader }
|
||||
func (f *fakeStore) Peers() ([]string, error) { return []string{f.leader}, nil }
|
||||
func (f *fakeStore) AddPeer(host string) error { return nil }
|
||||
func (f *fakeStore) CreateNode(host string) (*NodeInfo, error) {
|
||||
return &NodeInfo{ID: f.newNodeID, Host: host}, nil
|
||||
}
|
||||
func (f *fakeStore) NodeByHost(host string) (*NodeInfo, error) { return nil, nil }
|
||||
func (f *fakeStore) WaitForDataChanged() error {
|
||||
<-f.blockChan
|
||||
return nil
|
||||
}
|
||||
func (f *fakeStore) enableLocalRaft() error {
|
||||
return nil
|
||||
}
|
||||
func (f *fakeStore) SetPeers(addrs []string) error {
|
||||
return nil
|
||||
}
|
516
meta/state.go
516
meta/state.go
|
@ -1,516 +0,0 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/hashicorp/raft-boltdb"
|
||||
)
|
||||
|
||||
// raftState abstracts the interaction of the raft consensus layer
|
||||
// across local or remote nodes. It is a form of the state design pattern and allows
|
||||
// the meta.Store to change its behavior with the raft layer at runtime.
|
||||
type raftState interface {
|
||||
open() error
|
||||
remove() error
|
||||
initialize() error
|
||||
leader() string
|
||||
isLeader() bool
|
||||
sync(index uint64, timeout time.Duration) error
|
||||
setPeers(addrs []string) error
|
||||
addPeer(addr string) error
|
||||
removePeer(addr string) error
|
||||
peers() ([]string, error)
|
||||
invalidate() error
|
||||
close() error
|
||||
lastIndex() uint64
|
||||
apply(b []byte) error
|
||||
snapshot() error
|
||||
isLocal() bool
|
||||
}
|
||||
|
||||
// localRaft is a consensus strategy that uses a local raft implementation for
|
||||
// consensus operations.
|
||||
type localRaft struct {
|
||||
wg sync.WaitGroup
|
||||
closing chan struct{}
|
||||
store *Store
|
||||
raft *raft.Raft
|
||||
transport *raft.NetworkTransport
|
||||
peerStore raft.PeerStore
|
||||
raftStore *raftboltdb.BoltStore
|
||||
raftLayer *raftLayer
|
||||
}
|
||||
|
||||
func (r *localRaft) remove() error {
|
||||
if err := os.RemoveAll(filepath.Join(r.store.path, "raft.db")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.RemoveAll(filepath.Join(r.store.path, "peers.json")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.RemoveAll(filepath.Join(r.store.path, "snapshots")); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *localRaft) updateMetaData(ms *Data) {
|
||||
if ms == nil {
|
||||
return
|
||||
}
|
||||
|
||||
updated := false
|
||||
r.store.mu.RLock()
|
||||
if ms.Index > r.store.data.Index {
|
||||
updated = true
|
||||
}
|
||||
r.store.mu.RUnlock()
|
||||
|
||||
if updated {
|
||||
r.store.Logger.Printf("Updating metastore to term=%v index=%v", ms.Term, ms.Index)
|
||||
r.store.mu.Lock()
|
||||
r.store.data = ms
|
||||
// Signal any blocked goroutines that the meta store has been updated
|
||||
r.store.notifyChanged()
|
||||
r.store.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *localRaft) invalidate() error {
|
||||
if r.store.IsLeader() {
|
||||
return nil
|
||||
}
|
||||
|
||||
ms, err := r.store.rpc.fetchMetaData(false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error fetching meta data: %s", err)
|
||||
}
|
||||
|
||||
r.updateMetaData(ms)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *localRaft) open() error {
|
||||
r.closing = make(chan struct{})
|
||||
|
||||
s := r.store
|
||||
// Setup raft configuration.
|
||||
config := raft.DefaultConfig()
|
||||
config.LogOutput = ioutil.Discard
|
||||
|
||||
if s.clusterTracingEnabled {
|
||||
config.Logger = s.Logger
|
||||
}
|
||||
config.HeartbeatTimeout = s.HeartbeatTimeout
|
||||
config.ElectionTimeout = s.ElectionTimeout
|
||||
config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
|
||||
config.CommitTimeout = s.CommitTimeout
|
||||
// Since we actually never call `removePeer` this is safe.
|
||||
// If in the future we decide to call remove peer we have to re-evaluate how to handle this
|
||||
config.ShutdownOnRemove = false
|
||||
|
||||
// If no peers are set in the config or there is one and we are it, then start as a single server.
|
||||
if len(s.peers) <= 1 {
|
||||
config.EnableSingleNode = true
|
||||
// Ensure we can always become the leader
|
||||
config.DisableBootstrapAfterElect = false
|
||||
}
|
||||
|
||||
// Build raft layer to multiplex listener.
|
||||
r.raftLayer = newRaftLayer(s.RaftListener, s.RemoteAddr)
|
||||
|
||||
// Create a transport layer
|
||||
r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, config.LogOutput)
|
||||
|
||||
// Create peer storage.
|
||||
r.peerStore = raft.NewJSONPeers(s.path, r.transport)
|
||||
|
||||
peers, err := r.peerStore.Peers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For single-node clusters, we can update the raft peers before we start the cluster if the hostname
|
||||
// has changed.
|
||||
if config.EnableSingleNode {
|
||||
if err := r.peerStore.SetPeers([]string{s.RemoteAddr.String()}); err != nil {
|
||||
return err
|
||||
}
|
||||
peers = []string{s.RemoteAddr.String()}
|
||||
}
|
||||
|
||||
// If we have multiple nodes in the cluster, make sure our address is in the raft peers or
|
||||
// we won't be able to boot into the cluster because the other peers will reject our new hostname. This
|
||||
// is difficult to resolve automatically because we need to have all the raft peers agree on the current members
|
||||
// of the cluster before we can change them.
|
||||
if len(peers) > 0 && !raft.PeerContained(peers, s.RemoteAddr.String()) {
|
||||
s.Logger.Printf("%s is not in the list of raft peers. Please update %v/peers.json on all raft nodes to have the same contents.", s.RemoteAddr.String(), s.Path())
|
||||
return fmt.Errorf("peers out of sync: %v not in %v", s.RemoteAddr.String(), peers)
|
||||
}
|
||||
|
||||
// Create the log store and stable store.
|
||||
store, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("new bolt store: %s", err)
|
||||
}
|
||||
r.raftStore = store
|
||||
|
||||
// Create the snapshot store.
|
||||
snapshots, err := raft.NewFileSnapshotStore(s.path, raftSnapshotsRetained, os.Stderr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("file snapshot store: %s", err)
|
||||
}
|
||||
|
||||
// Create raft log.
|
||||
ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("new raft: %s", err)
|
||||
}
|
||||
r.raft = ra
|
||||
|
||||
r.wg.Add(1)
|
||||
go r.logLeaderChanges()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *localRaft) logLeaderChanges() {
|
||||
defer r.wg.Done()
|
||||
// Logs our current state (Node at 1.2.3.4:8088 [Follower])
|
||||
r.store.Logger.Printf(r.raft.String())
|
||||
for {
|
||||
select {
|
||||
case <-r.closing:
|
||||
return
|
||||
case <-r.raft.LeaderCh():
|
||||
peers, err := r.peers()
|
||||
if err != nil {
|
||||
r.store.Logger.Printf("failed to lookup peers: %v", err)
|
||||
}
|
||||
r.store.Logger.Printf("%v. peers=%v", r.raft.String(), peers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *localRaft) close() error {
|
||||
if r.closing != nil {
|
||||
close(r.closing)
|
||||
}
|
||||
r.wg.Wait()
|
||||
|
||||
if r.transport != nil {
|
||||
r.transport.Close()
|
||||
r.transport = nil
|
||||
}
|
||||
|
||||
// Shutdown raft.
|
||||
if r.raft != nil {
|
||||
if err := r.raft.Shutdown().Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
r.raft = nil
|
||||
}
|
||||
|
||||
if r.raftStore != nil {
|
||||
r.raftStore.Close()
|
||||
r.raftStore = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *localRaft) initialize() error {
|
||||
s := r.store
|
||||
// If we have committed entries then the store is already in the cluster.
|
||||
if index, err := r.raftStore.LastIndex(); err != nil {
|
||||
return fmt.Errorf("last index: %s", err)
|
||||
} else if index > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Force set peers.
|
||||
if err := r.setPeers(s.peers); err != nil {
|
||||
return fmt.Errorf("set raft peers: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// apply applies a serialized command to the raft log.
|
||||
func (r *localRaft) apply(b []byte) error {
|
||||
// Apply to raft log.
|
||||
f := r.raft.Apply(b, 0)
|
||||
if err := f.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Return response if it's an error.
|
||||
// No other non-nil objects should be returned.
|
||||
resp := f.Response()
|
||||
if err, ok := resp.(error); ok {
|
||||
return lookupError(err)
|
||||
}
|
||||
assert(resp == nil, "unexpected response: %#v", resp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *localRaft) lastIndex() uint64 {
|
||||
return r.raft.LastIndex()
|
||||
}
|
||||
|
||||
func (r *localRaft) sync(index uint64, timeout time.Duration) error {
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
// Wait for next tick or timeout.
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-timer.C:
|
||||
return errors.New("timeout")
|
||||
}
|
||||
|
||||
// Compare index against current metadata.
|
||||
r.store.mu.Lock()
|
||||
ok := (r.store.data.Index >= index)
|
||||
r.store.mu.Unlock()
|
||||
|
||||
// Exit if we are at least at the given index.
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *localRaft) snapshot() error {
|
||||
future := r.raft.Snapshot()
|
||||
return future.Error()
|
||||
}
|
||||
|
||||
// addPeer adds addr to the list of peers in the cluster.
|
||||
func (r *localRaft) addPeer(addr string) error {
|
||||
peers, err := r.peerStore.Peers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(peers) >= 3 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if fut := r.raft.AddPeer(addr); fut.Error() != nil {
|
||||
return fut.Error()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removePeer removes addr from the list of peers in the cluster.
|
||||
func (r *localRaft) removePeer(addr string) error {
|
||||
// Only do this on the leader
|
||||
if !r.isLeader() {
|
||||
return errors.New("not the leader")
|
||||
}
|
||||
if fut := r.raft.RemovePeer(addr); fut.Error() != nil {
|
||||
return fut.Error()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setPeers sets a list of peers in the cluster.
|
||||
func (r *localRaft) setPeers(addrs []string) error {
|
||||
return r.raft.SetPeers(addrs).Error()
|
||||
}
|
||||
|
||||
func (r *localRaft) peers() ([]string, error) {
|
||||
return r.peerStore.Peers()
|
||||
}
|
||||
|
||||
func (r *localRaft) leader() string {
|
||||
if r.raft == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return r.raft.Leader()
|
||||
}
|
||||
|
||||
func (r *localRaft) isLeader() bool {
|
||||
if r.raft == nil {
|
||||
return false
|
||||
}
|
||||
return r.raft.State() == raft.Leader
|
||||
}
|
||||
|
||||
func (r *localRaft) isLocal() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// remoteRaft is a consensus strategy that uses a remote raft cluster for
|
||||
// consensus operations.
|
||||
type remoteRaft struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
func (r *remoteRaft) remove() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *remoteRaft) updateMetaData(ms *Data) {
|
||||
if ms == nil {
|
||||
return
|
||||
}
|
||||
|
||||
updated := false
|
||||
r.store.mu.RLock()
|
||||
if ms.Index > r.store.data.Index {
|
||||
updated = true
|
||||
}
|
||||
r.store.mu.RUnlock()
|
||||
|
||||
if updated {
|
||||
r.store.Logger.Printf("Updating metastore to term=%v index=%v", ms.Term, ms.Index)
|
||||
r.store.mu.Lock()
|
||||
r.store.data = ms
|
||||
// Signal any blocked goroutines that the meta store has been updated
|
||||
r.store.notifyChanged()
|
||||
r.store.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *remoteRaft) invalidate() error {
|
||||
ms, err := r.store.rpc.fetchMetaData(false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error fetching meta data: %s", err)
|
||||
}
|
||||
|
||||
r.updateMetaData(ms)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *remoteRaft) setPeers(addrs []string) error {
|
||||
// Convert to JSON
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
if err := enc.Encode(addrs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write out as JSON
|
||||
return ioutil.WriteFile(filepath.Join(r.store.path, "peers.json"), buf.Bytes(), 0755)
|
||||
}
|
||||
|
||||
// addPeer adds addr to the list of peers in the cluster.
|
||||
func (r *remoteRaft) addPeer(addr string) error {
|
||||
return fmt.Errorf("cannot add peer using remote raft")
|
||||
}
|
||||
|
||||
// removePeer does nothing for remoteRaft.
|
||||
func (r *remoteRaft) removePeer(addr string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *remoteRaft) peers() ([]string, error) {
|
||||
return readPeersJSON(filepath.Join(r.store.path, "peers.json"))
|
||||
}
|
||||
|
||||
func (r *remoteRaft) open() error {
|
||||
if err := r.setPeers(r.store.peers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-r.store.closing:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
ms, err := r.store.rpc.fetchMetaData(true)
|
||||
if err != nil {
|
||||
r.store.Logger.Printf("fetch metastore: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
r.updateMetaData(ms)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *remoteRaft) close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// apply applies a serialized command to the raft log.
|
||||
func (r *remoteRaft) apply(b []byte) error {
|
||||
return fmt.Errorf("cannot apply log while in remote raft state")
|
||||
}
|
||||
|
||||
func (r *remoteRaft) initialize() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *remoteRaft) leader() string {
|
||||
if len(r.store.peers) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return r.store.peers[rand.Intn(len(r.store.peers))]
|
||||
}
|
||||
|
||||
func (r *remoteRaft) isLeader() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *remoteRaft) isLocal() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *remoteRaft) lastIndex() uint64 {
|
||||
return r.store.cachedData().Index
|
||||
}
|
||||
|
||||
func (r *remoteRaft) sync(index uint64, timeout time.Duration) error {
|
||||
//FIXME: jwilder: check index and timeout
|
||||
return r.store.invalidate()
|
||||
}
|
||||
|
||||
func (r *remoteRaft) snapshot() error {
|
||||
return fmt.Errorf("cannot snapshot while in remote raft state")
|
||||
}
|
||||
|
||||
func readPeersJSON(path string) ([]string, error) {
|
||||
// Read the file
|
||||
buf, err := ioutil.ReadFile(path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check for no peers
|
||||
if len(buf) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Decode the peers
|
||||
var peers []string
|
||||
dec := json.NewDecoder(bytes.NewReader(buf))
|
||||
if err := dec.Decode(&peers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return peers, nil
|
||||
}
|
2360
meta/store.go
2360
meta/store.go
File diff suppressed because it is too large
Load Diff
1409
meta/store_test.go
1409
meta/store_test.go
File diff suppressed because it is too large
Load Diff
|
@ -13,8 +13,8 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
const leaderWaitTimeout = 30 * time.Second
|
||||
|
@ -88,17 +88,16 @@ type Monitor struct {
|
|||
storeAddress string
|
||||
storeInterval time.Duration
|
||||
|
||||
MetaStore interface {
|
||||
ClusterID() (uint64, error)
|
||||
NodeID() uint64
|
||||
WaitForLeader(d time.Duration) error
|
||||
IsLeader() bool
|
||||
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
||||
CreateRetentionPolicyIfNotExists(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error)
|
||||
MetaClient interface {
|
||||
ClusterID() uint64
|
||||
CreateDatabase(name string) (*meta.DatabaseInfo, error)
|
||||
CreateRetentionPolicy(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error)
|
||||
SetDefaultRetentionPolicy(database, name string) error
|
||||
DropRetentionPolicy(database, name string) error
|
||||
}
|
||||
|
||||
NodeID uint64
|
||||
|
||||
PointsWriter interface {
|
||||
WritePoints(p *cluster.WritePointsRequest) error
|
||||
}
|
||||
|
@ -305,11 +304,11 @@ func (m *Monitor) Diagnostics() (map[string]*Diagnostic, error) {
|
|||
|
||||
// createInternalStorage ensures the internal storage has been created.
|
||||
func (m *Monitor) createInternalStorage() {
|
||||
if !m.MetaStore.IsLeader() || m.storeCreated {
|
||||
if m.storeCreated {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := m.MetaStore.CreateDatabaseIfNotExists(m.storeDatabase); err != nil {
|
||||
if _, err := m.MetaClient.CreateDatabase(m.storeDatabase); err != nil {
|
||||
m.Logger.Printf("failed to create database '%s', failed to create storage: %s",
|
||||
m.storeDatabase, err.Error())
|
||||
return
|
||||
|
@ -318,19 +317,19 @@ func (m *Monitor) createInternalStorage() {
|
|||
rpi := meta.NewRetentionPolicyInfo(MonitorRetentionPolicy)
|
||||
rpi.Duration = MonitorRetentionPolicyDuration
|
||||
rpi.ReplicaN = 1
|
||||
if _, err := m.MetaStore.CreateRetentionPolicyIfNotExists(m.storeDatabase, rpi); err != nil {
|
||||
if _, err := m.MetaClient.CreateRetentionPolicy(m.storeDatabase, rpi); err != nil {
|
||||
m.Logger.Printf("failed to create retention policy '%s', failed to create internal storage: %s",
|
||||
rpi.Name, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := m.MetaStore.SetDefaultRetentionPolicy(m.storeDatabase, rpi.Name); err != nil {
|
||||
if err := m.MetaClient.SetDefaultRetentionPolicy(m.storeDatabase, rpi.Name); err != nil {
|
||||
m.Logger.Printf("failed to set default retention policy on '%s', failed to create internal storage: %s",
|
||||
m.storeDatabase, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err := m.MetaStore.DropRetentionPolicy(m.storeDatabase, "default")
|
||||
err := m.MetaClient.DropRetentionPolicy(m.storeDatabase, "default")
|
||||
if err != nil && err.Error() != influxdb.ErrRetentionPolicyNotFound("default").Error() {
|
||||
m.Logger.Printf("failed to delete retention policy 'default', failed to created internal storage: %s", err.Error())
|
||||
return
|
||||
|
@ -346,18 +345,12 @@ func (m *Monitor) storeStatistics() {
|
|||
m.Logger.Printf("Storing statistics in database '%s' retention policy '%s', at interval %s",
|
||||
m.storeDatabase, m.storeRetentionPolicy, m.storeInterval)
|
||||
|
||||
if err := m.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil {
|
||||
m.Logger.Printf("failed to detect a cluster leader, terminating storage: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Get cluster-level metadata. Nothing different is going to happen if errors occur.
|
||||
clusterID, _ := m.MetaStore.ClusterID()
|
||||
nodeID := m.MetaStore.NodeID()
|
||||
clusterID := m.MetaClient.ClusterID()
|
||||
hostname, _ := os.Hostname()
|
||||
clusterTags := map[string]string{
|
||||
"clusterID": fmt.Sprintf("%d", clusterID),
|
||||
"nodeID": fmt.Sprintf("%d", nodeID),
|
||||
"nodeID": fmt.Sprintf("%d", m.NodeID),
|
||||
"hostname": hostname,
|
||||
}
|
||||
|
||||
|
|
|
@ -3,11 +3,10 @@ package monitor
|
|||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// Test that a registered stats client results in the correct SHOW STATS output.
|
||||
|
@ -37,24 +36,22 @@ func Test_RegisterStats(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type mockMetastore struct{}
|
||||
type mockMetaClient struct{}
|
||||
|
||||
func (m *mockMetastore) ClusterID() (uint64, error) { return 1, nil }
|
||||
func (m *mockMetastore) NodeID() uint64 { return 2 }
|
||||
func (m *mockMetastore) WaitForLeader(d time.Duration) error { return nil }
|
||||
func (m *mockMetastore) IsLeader() bool { return true }
|
||||
func (m *mockMetastore) SetDefaultRetentionPolicy(database, name string) error { return nil }
|
||||
func (m *mockMetastore) DropRetentionPolicy(database, name string) error { return nil }
|
||||
func (m *mockMetastore) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) {
|
||||
func (m *mockMetaClient) ClusterID() uint64 { return 1 }
|
||||
func (m *mockMetaClient) IsLeader() bool { return true }
|
||||
func (m *mockMetaClient) SetDefaultRetentionPolicy(database, name string) error { return nil }
|
||||
func (m *mockMetaClient) DropRetentionPolicy(database, name string) error { return nil }
|
||||
func (m *mockMetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockMetastore) CreateRetentionPolicyIfNotExists(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) {
|
||||
func (m *mockMetaClient) CreateRetentionPolicy(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func openMonitor(t *testing.T) *Monitor {
|
||||
monitor := New(NewConfig())
|
||||
monitor.MetaStore = &mockMetastore{}
|
||||
monitor.MetaClient = &mockMetaClient{}
|
||||
err := monitor.Open()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open monitor: %s", err.Error())
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const nodeFile = "node.json"
|
||||
|
||||
type Node struct {
|
||||
path string
|
||||
ID uint64
|
||||
MetaServers []string
|
||||
}
|
||||
|
||||
// NewNode will load the node information from disk if present
|
||||
func NewNode(path string) (*Node, error) {
|
||||
n := &Node{
|
||||
path: path,
|
||||
}
|
||||
|
||||
f, err := os.Open(filepath.Join(path, nodeFile))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := json.NewDecoder(f).Decode(n); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Save will save the node file to disk and replace the existing one if present
|
||||
func (n *Node) Save() error {
|
||||
file := filepath.Join(n.path, nodeFile)
|
||||
tmpFile := file + "tmp"
|
||||
|
||||
f, err := os.Create(tmpFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := json.NewEncoder(f).Encode(n); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Rename(tmpFile, file)
|
||||
}
|
|
@ -12,8 +12,8 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
"github.com/kimor79/gollectd"
|
||||
)
|
||||
|
@ -38,16 +38,15 @@ type pointsWriter interface {
|
|||
}
|
||||
|
||||
// metaStore is an internal interface to make testing easier.
|
||||
type metaStore interface {
|
||||
WaitForLeader(d time.Duration) error
|
||||
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
||||
type metaClient interface {
|
||||
CreateDatabase(name string) (*meta.DatabaseInfo, error)
|
||||
}
|
||||
|
||||
// Service represents a UDP server which receives metrics in collectd's binary
|
||||
// protocol and stores them in InfluxDB.
|
||||
type Service struct {
|
||||
Config *Config
|
||||
MetaStore metaStore
|
||||
MetaClient metaClient
|
||||
PointsWriter pointsWriter
|
||||
Logger *log.Logger
|
||||
|
||||
|
@ -92,12 +91,7 @@ func (s *Service) Open() error {
|
|||
return fmt.Errorf("PointsWriter is nil")
|
||||
}
|
||||
|
||||
if err := s.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil {
|
||||
s.Logger.Printf("Failed to detect a cluster leader: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := s.MetaStore.CreateDatabaseIfNotExists(s.Config.Database); err != nil {
|
||||
if _, err := s.MetaClient.CreateDatabase(s.Config.Database); err != nil {
|
||||
s.Logger.Printf("Failed to ensure target database %s exists: %s", s.Config.Database, err.Error())
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -10,8 +10,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/toml"
|
||||
)
|
||||
|
||||
|
@ -23,7 +23,7 @@ func TestService_CreatesDatabase(t *testing.T) {
|
|||
|
||||
createDatabaseCalled := false
|
||||
|
||||
ms := &testMetaStore{}
|
||||
ms := &testMetaClient{}
|
||||
ms.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) {
|
||||
if name != s.Config.Database {
|
||||
t.Errorf("\n\texp = %s\n\tgot = %s\n", s.Config.Database, name)
|
||||
|
@ -31,7 +31,7 @@ func TestService_CreatesDatabase(t *testing.T) {
|
|||
createDatabaseCalled = true
|
||||
return nil, nil
|
||||
}
|
||||
s.Service.MetaStore = ms
|
||||
s.Service.MetaClient = ms
|
||||
|
||||
s.Open()
|
||||
s.Close()
|
||||
|
@ -55,7 +55,7 @@ func TestService_BatchSize(t *testing.T) {
|
|||
s := newTestService(batchSize, time.Second)
|
||||
|
||||
pointCh := make(chan models.Point)
|
||||
s.MetaStore.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil }
|
||||
s.MetaClient.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil }
|
||||
s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error {
|
||||
if len(req.Points) != batchSize {
|
||||
t.Errorf("\n\texp = %d\n\tgot = %d\n", batchSize, len(req.Points))
|
||||
|
@ -124,7 +124,7 @@ func TestService_BatchDuration(t *testing.T) {
|
|||
s := newTestService(5000, 250*time.Millisecond)
|
||||
|
||||
pointCh := make(chan models.Point, 1000)
|
||||
s.MetaStore.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil }
|
||||
s.MetaClient.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil }
|
||||
s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error {
|
||||
for _, p := range req.Points {
|
||||
pointCh <- p
|
||||
|
@ -180,7 +180,7 @@ Loop:
|
|||
|
||||
type testService struct {
|
||||
*Service
|
||||
MetaStore testMetaStore
|
||||
MetaClient testMetaClient
|
||||
PointsWriter testPointsWriter
|
||||
}
|
||||
|
||||
|
@ -194,7 +194,7 @@ func newTestService(batchSize int, batchDuration time.Duration) *testService {
|
|||
}),
|
||||
}
|
||||
s.Service.PointsWriter = &s.PointsWriter
|
||||
s.Service.MetaStore = &s.MetaStore
|
||||
s.Service.MetaClient = &s.MetaClient
|
||||
|
||||
// Set the collectd types using test string.
|
||||
if err := s.SetTypes(typesDBText); err != nil {
|
||||
|
@ -216,19 +216,15 @@ func (w *testPointsWriter) WritePoints(p *cluster.WritePointsRequest) error {
|
|||
return w.WritePointsFn(p)
|
||||
}
|
||||
|
||||
type testMetaStore struct {
|
||||
type testMetaClient struct {
|
||||
CreateDatabaseIfNotExistsFn func(name string) (*meta.DatabaseInfo, error)
|
||||
//DatabaseFn func(name string) (*meta.DatabaseInfo, error)
|
||||
}
|
||||
|
||||
func (ms *testMetaStore) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) {
|
||||
func (ms *testMetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) {
|
||||
return ms.CreateDatabaseIfNotExistsFn(name)
|
||||
}
|
||||
|
||||
func (ms *testMetaStore) WaitForLeader(d time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func wait(c chan struct{}, d time.Duration) (err error) {
|
||||
select {
|
||||
case <-c:
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -41,11 +41,12 @@ type queryExecutor interface {
|
|||
ExecuteQuery(query *influxql.Query, database string, chunkSize int, closing chan struct{}) (<-chan *influxql.Result, error)
|
||||
}
|
||||
|
||||
// metaStore is an internal interface to make testing easier.
|
||||
type metaStore interface {
|
||||
IsLeader() bool
|
||||
// metaClient is an internal interface to make testing easier.
|
||||
type metaClient interface {
|
||||
AcquireLease(name string) (l *meta.Lease, err error)
|
||||
Databases() ([]meta.DatabaseInfo, error)
|
||||
Database(name string) (*meta.DatabaseInfo, error)
|
||||
NodeID() uint64
|
||||
}
|
||||
|
||||
// RunRequest is a request to run one or more CQs.
|
||||
|
@ -72,7 +73,7 @@ func (rr *RunRequest) matches(cq *meta.ContinuousQueryInfo) bool {
|
|||
|
||||
// Service manages continuous query execution.
|
||||
type Service struct {
|
||||
MetaStore metaStore
|
||||
MetaClient metaClient
|
||||
QueryExecutor queryExecutor
|
||||
Config *Config
|
||||
RunInterval time.Duration
|
||||
|
@ -111,7 +112,7 @@ func (s *Service) Open() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
assert(s.MetaStore != nil, "MetaStore is nil")
|
||||
assert(s.MetaClient != nil, "MetaClient is nil")
|
||||
assert(s.QueryExecutor != nil, "QueryExecutor is nil")
|
||||
|
||||
s.stop = make(chan struct{})
|
||||
|
@ -144,7 +145,7 @@ func (s *Service) Run(database, name string, t time.Time) error {
|
|||
|
||||
if database != "" {
|
||||
// Find the requested database.
|
||||
db, err := s.MetaStore.Database(database)
|
||||
db, err := s.MetaClient.Database(database)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if db == nil {
|
||||
|
@ -154,7 +155,7 @@ func (s *Service) Run(database, name string, t time.Time) error {
|
|||
} else {
|
||||
// Get all databases.
|
||||
var err error
|
||||
dbs, err = s.MetaStore.Databases()
|
||||
dbs, err = s.MetaClient.Databases()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -183,6 +184,7 @@ func (s *Service) Run(database, name string, t time.Time) error {
|
|||
|
||||
// backgroundLoop runs on a go routine and periodically executes CQs.
|
||||
func (s *Service) backgroundLoop() {
|
||||
leaseName := "continuous_querier"
|
||||
defer s.wg.Done()
|
||||
for {
|
||||
select {
|
||||
|
@ -190,22 +192,45 @@ func (s *Service) backgroundLoop() {
|
|||
s.Logger.Println("continuous query service terminating")
|
||||
return
|
||||
case req := <-s.RunCh:
|
||||
if s.MetaStore.IsLeader() {
|
||||
if !s.hasContinuousQueries() {
|
||||
continue
|
||||
}
|
||||
if _, err := s.MetaClient.AcquireLease(leaseName); err == nil {
|
||||
s.Logger.Printf("running continuous queries by request for time: %v", req.Now)
|
||||
s.runContinuousQueries(req)
|
||||
}
|
||||
case <-time.After(s.RunInterval):
|
||||
if s.MetaStore.IsLeader() {
|
||||
if !s.hasContinuousQueries() {
|
||||
continue
|
||||
}
|
||||
if _, err := s.MetaClient.AcquireLease(leaseName); err == nil {
|
||||
s.runContinuousQueries(&RunRequest{Now: time.Now()})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// hasContinuousQueries returns true if any CQs exist.
|
||||
func (s *Service) hasContinuousQueries() bool {
|
||||
// Get list of all databases.
|
||||
dbs, err := s.MetaClient.Databases()
|
||||
if err != nil {
|
||||
s.Logger.Println("error getting databases")
|
||||
return false
|
||||
}
|
||||
// Loop through all databases executing CQs.
|
||||
for _, db := range dbs {
|
||||
if len(db.ContinuousQueries) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// runContinuousQueries gets CQs from the meta store and runs them.
|
||||
func (s *Service) runContinuousQueries(req *RunRequest) {
|
||||
// Get list of all databases.
|
||||
dbs, err := s.MetaStore.Databases()
|
||||
dbs, err := s.MetaClient.Databases()
|
||||
if err != nil {
|
||||
s.Logger.Println("error getting databases")
|
||||
return
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -95,12 +95,12 @@ func TestContinuousQueryService_Run(t *testing.T) {
|
|||
|
||||
func TestContinuousQueryService_ResampleOptions(t *testing.T) {
|
||||
s := NewTestService(t)
|
||||
ms := NewMetaStore(t)
|
||||
ms.CreateDatabase("db", "")
|
||||
ms.CreateContinuousQuery("db", "cq", `CREATE CONTINUOUS QUERY cq ON db RESAMPLE EVERY 10s FOR 2m BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(1m) END`)
|
||||
s.MetaStore = ms
|
||||
mc := NewMetaClient(t)
|
||||
mc.CreateDatabase("db", "")
|
||||
mc.CreateContinuousQuery("db", "cq", `CREATE CONTINUOUS QUERY cq ON db RESAMPLE EVERY 10s FOR 2m BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(1m) END`)
|
||||
s.MetaClient = mc
|
||||
|
||||
db, err := s.MetaStore.Database("db")
|
||||
db, err := s.MetaClient.Database("db")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ func TestContinuousQueryService_NotLeader(t *testing.T) {
|
|||
s := NewTestService(t)
|
||||
// Set RunInterval high so we can test triggering with the RunCh below.
|
||||
s.RunInterval = 10 * time.Second
|
||||
s.MetaStore.(*MetaStore).Leader = false
|
||||
s.MetaClient.(*MetaClient).Leader = false
|
||||
|
||||
done := make(chan struct{})
|
||||
qe := s.QueryExecutor.(*QueryExecutor)
|
||||
|
@ -258,11 +258,11 @@ func TestContinuousQueryService_NotLeader(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test service behavior when meta store fails to get databases.
|
||||
func TestContinuousQueryService_MetaStoreFailsToGetDatabases(t *testing.T) {
|
||||
func TestContinuousQueryService_MetaClientFailsToGetDatabases(t *testing.T) {
|
||||
s := NewTestService(t)
|
||||
// Set RunInterval high so we can test triggering with the RunCh below.
|
||||
s.RunInterval = 10 * time.Second
|
||||
s.MetaStore.(*MetaStore).Err = errExpected
|
||||
s.MetaClient.(*MetaClient).Err = errExpected
|
||||
|
||||
done := make(chan struct{})
|
||||
qe := s.QueryExecutor.(*QueryExecutor)
|
||||
|
@ -285,7 +285,7 @@ func TestContinuousQueryService_MetaStoreFailsToGetDatabases(t *testing.T) {
|
|||
// Test ExecuteContinuousQuery with invalid queries.
|
||||
func TestExecuteContinuousQuery_InvalidQueries(t *testing.T) {
|
||||
s := NewTestService(t)
|
||||
dbis, _ := s.MetaStore.Databases()
|
||||
dbis, _ := s.MetaClient.Databases()
|
||||
dbi := dbis[0]
|
||||
cqi := dbi.ContinuousQueries[0]
|
||||
|
||||
|
@ -316,7 +316,7 @@ func TestExecuteContinuousQuery_QueryExecutor_Error(t *testing.T) {
|
|||
qe := s.QueryExecutor.(*QueryExecutor)
|
||||
qe.Err = errExpected
|
||||
|
||||
dbis, _ := s.MetaStore.Databases()
|
||||
dbis, _ := s.MetaClient.Databases()
|
||||
dbi := dbis[0]
|
||||
cqi := dbi.ContinuousQueries[0]
|
||||
|
||||
|
@ -330,8 +330,8 @@ func TestExecuteContinuousQuery_QueryExecutor_Error(t *testing.T) {
|
|||
// NewTestService returns a new *Service with default mock object members.
|
||||
func NewTestService(t *testing.T) *Service {
|
||||
s := NewService(NewConfig())
|
||||
ms := NewMetaStore(t)
|
||||
s.MetaStore = ms
|
||||
ms := NewMetaClient(t)
|
||||
s.MetaClient = ms
|
||||
s.QueryExecutor = NewQueryExecutor(t)
|
||||
s.RunInterval = time.Millisecond
|
||||
|
||||
|
@ -351,45 +351,56 @@ func NewTestService(t *testing.T) *Service {
|
|||
return s
|
||||
}
|
||||
|
||||
// MetaStore is a mock meta store.
|
||||
type MetaStore struct {
|
||||
// MetaClient is a mock meta store.
|
||||
type MetaClient struct {
|
||||
mu sync.RWMutex
|
||||
Leader bool
|
||||
AllowLease bool
|
||||
DatabaseInfos []meta.DatabaseInfo
|
||||
Err error
|
||||
t *testing.T
|
||||
nodeID uint64
|
||||
}
|
||||
|
||||
// NewMetaStore returns a *MetaStore.
|
||||
func NewMetaStore(t *testing.T) *MetaStore {
|
||||
return &MetaStore{
|
||||
Leader: true,
|
||||
t: t,
|
||||
// NewMetaClient returns a *MetaClient.
|
||||
func NewMetaClient(t *testing.T) *MetaClient {
|
||||
return &MetaClient{
|
||||
Leader: true,
|
||||
AllowLease: true,
|
||||
t: t,
|
||||
nodeID: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// IsLeader returns true if the node is the cluster leader.
|
||||
func (ms *MetaStore) IsLeader() bool {
|
||||
ms.mu.RLock()
|
||||
defer ms.mu.RUnlock()
|
||||
return ms.Leader
|
||||
// NodeID returns the client's node ID.
|
||||
func (ms *MetaClient) NodeID() uint64 { return ms.nodeID }
|
||||
|
||||
// AcquireLease attempts to acquire the specified lease.
|
||||
func (ms *MetaClient) AcquireLease(name string) (l *meta.Lease, err error) {
|
||||
if ms.Leader {
|
||||
if ms.AllowLease {
|
||||
return &meta.Lease{Name: name}, nil
|
||||
}
|
||||
return nil, errors.New("another node owns the lease")
|
||||
}
|
||||
return nil, meta.ErrServiceUnavailable
|
||||
}
|
||||
|
||||
// Databases returns a list of database info about each database in the cluster.
|
||||
func (ms *MetaStore) Databases() ([]meta.DatabaseInfo, error) {
|
||||
func (ms *MetaClient) Databases() ([]meta.DatabaseInfo, error) {
|
||||
ms.mu.RLock()
|
||||
defer ms.mu.RUnlock()
|
||||
return ms.DatabaseInfos, ms.Err
|
||||
}
|
||||
|
||||
// Database returns a single database by name.
|
||||
func (ms *MetaStore) Database(name string) (*meta.DatabaseInfo, error) {
|
||||
func (ms *MetaClient) Database(name string) (*meta.DatabaseInfo, error) {
|
||||
ms.mu.RLock()
|
||||
defer ms.mu.RUnlock()
|
||||
return ms.database(name)
|
||||
}
|
||||
|
||||
func (ms *MetaStore) database(name string) (*meta.DatabaseInfo, error) {
|
||||
func (ms *MetaClient) database(name string) (*meta.DatabaseInfo, error) {
|
||||
if ms.Err != nil {
|
||||
return nil, ms.Err
|
||||
}
|
||||
|
@ -402,7 +413,7 @@ func (ms *MetaStore) database(name string) (*meta.DatabaseInfo, error) {
|
|||
}
|
||||
|
||||
// CreateDatabase adds a new database to the meta store.
|
||||
func (ms *MetaStore) CreateDatabase(name, defaultRetentionPolicy string) error {
|
||||
func (ms *MetaClient) CreateDatabase(name, defaultRetentionPolicy string) error {
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.Err != nil {
|
||||
|
@ -426,7 +437,7 @@ func (ms *MetaStore) CreateDatabase(name, defaultRetentionPolicy string) error {
|
|||
}
|
||||
|
||||
// CreateContinuousQuery adds a CQ to the meta store.
|
||||
func (ms *MetaStore) CreateContinuousQuery(database, name, query string) error {
|
||||
func (ms *MetaClient) CreateContinuousQuery(database, name, query string) error {
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.Err != nil {
|
||||
|
|
|
@ -14,8 +14,8 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/monitor"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -82,9 +82,8 @@ type Service struct {
|
|||
PointsWriter interface {
|
||||
WritePoints(p *cluster.WritePointsRequest) error
|
||||
}
|
||||
MetaStore interface {
|
||||
WaitForLeader(d time.Duration) error
|
||||
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
||||
MetaClient interface {
|
||||
CreateDatabase(name string) (*meta.DatabaseInfo, error)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -143,12 +142,7 @@ func (s *Service) Open() error {
|
|||
s.Monitor.RegisterDiagnosticsClient(key, s)
|
||||
}
|
||||
|
||||
if err := s.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil {
|
||||
s.logger.Printf("Failed to detect a cluster leader: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := s.MetaStore.CreateDatabaseIfNotExists(s.database); err != nil {
|
||||
if _, err := s.MetaClient.CreateDatabase(s.database); err != nil {
|
||||
s.logger.Printf("Failed to ensure target database %s exists: %s", s.database, err.Error())
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -8,9 +8,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/graphite"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/toml"
|
||||
)
|
||||
|
||||
|
@ -58,7 +58,7 @@ func Test_ServerGraphiteTCP(t *testing.T) {
|
|||
}
|
||||
service.PointsWriter = &pointsWriter
|
||||
dbCreator := DatabaseCreator{}
|
||||
service.MetaStore = &dbCreator
|
||||
service.MetaClient = &dbCreator
|
||||
|
||||
if err := service.Open(); err != nil {
|
||||
t.Fatalf("failed to open Graphite service: %s", err.Error())
|
||||
|
@ -131,7 +131,7 @@ func Test_ServerGraphiteUDP(t *testing.T) {
|
|||
}
|
||||
service.PointsWriter = &pointsWriter
|
||||
dbCreator := DatabaseCreator{}
|
||||
service.MetaStore = &dbCreator
|
||||
service.MetaClient = &dbCreator
|
||||
|
||||
if err := service.Open(); err != nil {
|
||||
t.Fatalf("failed to open Graphite service: %s", err.Error())
|
||||
|
@ -172,15 +172,11 @@ type DatabaseCreator struct {
|
|||
Created bool
|
||||
}
|
||||
|
||||
func (d *DatabaseCreator) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) {
|
||||
func (d *DatabaseCreator) CreateDatabase(name string) (*meta.DatabaseInfo, error) {
|
||||
d.Created = true
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *DatabaseCreator) WaitForLeader(t time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test Helpers
|
||||
func errstr(err error) string {
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package hh
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/toml"
|
||||
|
@ -58,3 +59,10 @@ func NewConfig() Config {
|
|||
PurgeInterval: toml.Duration(DefaultPurgeInterval),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) Validate() error {
|
||||
if c.Enabled && c.Dir == "" {
|
||||
return errors.New("HintedHandoff.Dir must be specified")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ type NodeProcessor struct {
|
|||
done chan struct{}
|
||||
|
||||
queue *queue
|
||||
meta metaStore
|
||||
meta metaClient
|
||||
writer shardWriter
|
||||
|
||||
statMap *expvar.Map
|
||||
|
@ -41,7 +41,7 @@ type NodeProcessor struct {
|
|||
|
||||
// NewNodeProcessor returns a new NodeProcessor for the given node, using dir for
|
||||
// the hinted-handoff data.
|
||||
func NewNodeProcessor(nodeID uint64, dir string, w shardWriter, m metaStore) *NodeProcessor {
|
||||
func NewNodeProcessor(nodeID uint64, dir string, w shardWriter, m metaClient) *NodeProcessor {
|
||||
key := strings.Join([]string{"hh_processor", dir}, ":")
|
||||
tags := map[string]string{"node": fmt.Sprintf("%d", nodeID), "path": dir}
|
||||
|
||||
|
@ -267,7 +267,7 @@ func (n *NodeProcessor) Tail() string {
|
|||
|
||||
// Active returns whether this node processor is for a currently active node.
|
||||
func (n *NodeProcessor) Active() (bool, error) {
|
||||
nio, err := n.meta.Node(n.nodeID)
|
||||
nio, err := n.meta.DataNode(n.nodeID)
|
||||
if err != nil {
|
||||
n.Logger.Printf("failed to determine if node %d is active: %s", n.nodeID, err.Error())
|
||||
return false, err
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
type fakeShardWriter struct {
|
||||
|
@ -23,7 +23,7 @@ type fakeMetaStore struct {
|
|||
NodeFn func(nodeID uint64) (*meta.NodeInfo, error)
|
||||
}
|
||||
|
||||
func (f *fakeMetaStore) Node(nodeID uint64) (*meta.NodeInfo, error) {
|
||||
func (f *fakeMetaStore) DataNode(nodeID uint64) (*meta.NodeInfo, error) {
|
||||
return f.NodeFn(nodeID)
|
||||
}
|
||||
|
||||
|
|
|
@ -13,9 +13,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/monitor"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// ErrHintedHandoffDisabled is returned when attempting to use a
|
||||
|
@ -43,7 +43,7 @@ type Service struct {
|
|||
cfg Config
|
||||
|
||||
shardWriter shardWriter
|
||||
metastore metaStore
|
||||
MetaClient metaClient
|
||||
|
||||
Monitor interface {
|
||||
RegisterDiagnosticsClient(name string, client monitor.DiagsClient)
|
||||
|
@ -55,12 +55,12 @@ type shardWriter interface {
|
|||
WriteShard(shardID, ownerID uint64, points []models.Point) error
|
||||
}
|
||||
|
||||
type metaStore interface {
|
||||
Node(id uint64) (ni *meta.NodeInfo, err error)
|
||||
type metaClient interface {
|
||||
DataNode(id uint64) (ni *meta.NodeInfo, err error)
|
||||
}
|
||||
|
||||
// NewService returns a new instance of Service.
|
||||
func NewService(c Config, w shardWriter, m metaStore) *Service {
|
||||
func NewService(c Config, w shardWriter, m metaClient) *Service {
|
||||
key := strings.Join([]string{"hh", c.Dir}, ":")
|
||||
tags := map[string]string{"path": c.Dir}
|
||||
|
||||
|
@ -71,7 +71,7 @@ func NewService(c Config, w shardWriter, m metaStore) *Service {
|
|||
statMap: influxdb.NewStatistics(key, "hh", tags),
|
||||
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
|
||||
shardWriter: w,
|
||||
metastore: m,
|
||||
MetaClient: m,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ func (s *Service) Open() error {
|
|||
continue
|
||||
}
|
||||
|
||||
n := NewNodeProcessor(nodeID, s.pathforNode(nodeID), s.shardWriter, s.metastore)
|
||||
n := NewNodeProcessor(nodeID, s.pathforNode(nodeID), s.shardWriter, s.MetaClient)
|
||||
if err := n.Open(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ func (s *Service) WriteShard(shardID, ownerID uint64, points []models.Point) err
|
|||
|
||||
processor, ok = s.processors[ownerID]
|
||||
if !ok {
|
||||
processor = NewNodeProcessor(ownerID, s.pathforNode(ownerID), s.shardWriter, s.metastore)
|
||||
processor = NewNodeProcessor(ownerID, s.pathforNode(ownerID), s.shardWriter, s.MetaClient)
|
||||
if err := processor.Open(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -22,9 +22,9 @@ import (
|
|||
"github.com/influxdb/influxdb/client"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/continuous_querier"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/uuid"
|
||||
)
|
||||
|
||||
|
@ -57,11 +57,11 @@ type Handler struct {
|
|||
requireAuthentication bool
|
||||
Version string
|
||||
|
||||
MetaStore interface {
|
||||
WaitForLeader(timeout time.Duration) error
|
||||
MetaClient interface {
|
||||
Database(name string) (*meta.DatabaseInfo, error)
|
||||
Authenticate(username, password string) (ui *meta.UserInfo, err error)
|
||||
Users() ([]meta.UserInfo, error)
|
||||
Users() []meta.UserInfo
|
||||
Ping(checkAllMetaServers bool) error
|
||||
}
|
||||
|
||||
QueryExecutor interface {
|
||||
|
@ -414,7 +414,7 @@ func (h *Handler) serveWriteJSON(w http.ResponseWriter, r *http.Request, body []
|
|||
return
|
||||
}
|
||||
|
||||
if di, err := h.MetaStore.Database(bp.Database); err != nil {
|
||||
if di, err := h.MetaClient.Database(bp.Database); err != nil {
|
||||
resultError(w, influxql.Result{Err: fmt.Errorf("metastore database error: %s", err)}, http.StatusInternalServerError)
|
||||
return
|
||||
} else if di == nil {
|
||||
|
@ -501,7 +501,7 @@ func (h *Handler) serveWriteLine(w http.ResponseWriter, r *http.Request, body []
|
|||
return
|
||||
}
|
||||
|
||||
if di, err := h.MetaStore.Database(database); err != nil {
|
||||
if di, err := h.MetaClient.Database(database); err != nil {
|
||||
resultError(w, influxql.Result{Err: fmt.Errorf("metastore database error: %s", err)}, http.StatusInternalServerError)
|
||||
return
|
||||
} else if di == nil {
|
||||
|
@ -566,22 +566,13 @@ func (h *Handler) serveOptions(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// servePing returns a simple response to let the client know the server is running.
|
||||
func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) {
|
||||
q := r.URL.Query()
|
||||
wfl := q.Get("wait_for_leader")
|
||||
h.statMap.Add(statPingRequest, 1)
|
||||
|
||||
if wfl != "" {
|
||||
d, err := time.ParseDuration(wfl)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := h.MetaStore.WaitForLeader(d); err != nil {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
if err := h.MetaClient.Ping(false); err != nil {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
h.statMap.Add(statPingRequest, 1)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
|
@ -713,11 +704,7 @@ func authenticate(inner func(http.ResponseWriter, *http.Request, *meta.UserInfo)
|
|||
var user *meta.UserInfo
|
||||
|
||||
// Retrieve user list.
|
||||
uis, err := h.MetaStore.Users()
|
||||
if err != nil {
|
||||
httpError(w, err.Error(), false, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
uis := h.MetaClient.Users()
|
||||
|
||||
// TODO corylanou: never allow this in the future without users
|
||||
if requireAuthentication && len(uis) > 0 {
|
||||
|
@ -733,7 +720,7 @@ func authenticate(inner func(http.ResponseWriter, *http.Request, *meta.UserInfo)
|
|||
return
|
||||
}
|
||||
|
||||
user, err = h.MetaStore.Authenticate(username, password)
|
||||
user, err = h.MetaClient.Authenticate(username, password)
|
||||
if err != nil {
|
||||
h.statMap.Add(statAuthFail, 1)
|
||||
httpError(w, err.Error(), false, http.StatusUnauthorized)
|
||||
|
|
|
@ -16,9 +16,9 @@ import (
|
|||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/client"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/httpd"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -318,51 +318,6 @@ func TestHandler_Ping(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Ensure the handler handles ping requests correctly, when waiting for leader.
|
||||
func TestHandler_PingWaitForLeader(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewRequest("GET", "/ping?wait_for_leader=1s", nil))
|
||||
if w.Code != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
h.ServeHTTP(w, MustNewRequest("HEAD", "/ping?wait_for_leader=1s", nil))
|
||||
if w.Code != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler handles ping requests correctly, when timeout expires waiting for leader.
|
||||
func TestHandler_PingWaitForLeaderTimeout(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
h.MetaStore.WaitForLeaderFn = func(d time.Duration) error {
|
||||
return fmt.Errorf("timeout")
|
||||
}
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewRequest("GET", "/ping?wait_for_leader=1s", nil))
|
||||
if w.Code != http.StatusServiceUnavailable {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
h.ServeHTTP(w, MustNewRequest("HEAD", "/ping?wait_for_leader=1s", nil))
|
||||
if w.Code != http.StatusServiceUnavailable {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the handler handles bad ping requests
|
||||
func TestHandler_PingWaitForLeaderBadRequest(t *testing.T) {
|
||||
h := NewHandler(false)
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewRequest("GET", "/ping?wait_for_leader=1xxx", nil))
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
h.ServeHTTP(w, MustNewRequest("HEAD", "/ping?wait_for_leader=abc", nil))
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Fatalf("unexpected status: %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure write endpoint can handle bad requests
|
||||
func TestHandler_HandleBadRequestBody(t *testing.T) {
|
||||
b := bytes.NewReader(make([]byte, 10))
|
||||
|
@ -468,7 +423,7 @@ func TestNormalizeBatchPoints(t *testing.T) {
|
|||
// NewHandler represents a test wrapper for httpd.Handler.
|
||||
type Handler struct {
|
||||
*httpd.Handler
|
||||
MetaStore HandlerMetaStore
|
||||
MetaClient HandlerMetaStore
|
||||
QueryExecutor HandlerQueryExecutor
|
||||
TSDBStore HandlerTSDBStore
|
||||
}
|
||||
|
@ -479,26 +434,26 @@ func NewHandler(requireAuthentication bool) *Handler {
|
|||
h := &Handler{
|
||||
Handler: httpd.NewHandler(requireAuthentication, true, false, statMap),
|
||||
}
|
||||
h.Handler.MetaStore = &h.MetaStore
|
||||
h.Handler.MetaClient = &h.MetaClient
|
||||
h.Handler.QueryExecutor = &h.QueryExecutor
|
||||
h.Handler.Version = "0.0.0"
|
||||
return h
|
||||
}
|
||||
|
||||
// HandlerMetaStore is a mock implementation of Handler.MetaStore.
|
||||
// HandlerMetaStore is a mock implementation of Handler.MetaClient.
|
||||
type HandlerMetaStore struct {
|
||||
WaitForLeaderFn func(d time.Duration) error
|
||||
DatabaseFn func(name string) (*meta.DatabaseInfo, error)
|
||||
AuthenticateFn func(username, password string) (ui *meta.UserInfo, err error)
|
||||
UsersFn func() ([]meta.UserInfo, error)
|
||||
PingFn func(d time.Duration) error
|
||||
DatabaseFn func(name string) (*meta.DatabaseInfo, error)
|
||||
AuthenticateFn func(username, password string) (ui *meta.UserInfo, err error)
|
||||
UsersFn func() []meta.UserInfo
|
||||
}
|
||||
|
||||
func (s *HandlerMetaStore) WaitForLeader(d time.Duration) error {
|
||||
if s.WaitForLeaderFn == nil {
|
||||
func (s *HandlerMetaStore) Ping(b bool) error {
|
||||
if s.PingFn == nil {
|
||||
// Default behaviour is to assume there is a leader.
|
||||
return nil
|
||||
}
|
||||
return s.WaitForLeaderFn(d)
|
||||
return s.Ping(b)
|
||||
}
|
||||
|
||||
func (s *HandlerMetaStore) Database(name string) (*meta.DatabaseInfo, error) {
|
||||
|
@ -509,7 +464,7 @@ func (s *HandlerMetaStore) Authenticate(username, password string) (ui *meta.Use
|
|||
return s.AuthenticateFn(username, password)
|
||||
}
|
||||
|
||||
func (s *HandlerMetaStore) Users() ([]meta.UserInfo, error) {
|
||||
func (s *HandlerMetaStore) Users() []meta.UserInfo {
|
||||
return s.UsersFn()
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
)
|
||||
|
@ -97,6 +98,19 @@ func (s *Service) Open() error {
|
|||
s.ln = listener
|
||||
}
|
||||
|
||||
// wait for the listeners to start
|
||||
timeout := time.Now().Add(time.Second)
|
||||
for {
|
||||
if s.ln.Addr() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("unable to open without http listener running")
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Begin listening for requests in a separate goroutine.
|
||||
go s.serve()
|
||||
return nil
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,122 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/toml"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultEnabled is the default state for the meta service to run
|
||||
DefaultEnabled = true
|
||||
|
||||
// DefaultHostname is the default hostname if one is not provided.
|
||||
DefaultHostname = "localhost"
|
||||
|
||||
// DefaultRaftBindAddress is the default address to bind to.
|
||||
DefaultRaftBindAddress = ":8088"
|
||||
|
||||
// DefaultHTTPBindAddress is the default address to bind the API to.
|
||||
DefaultHTTPBindAddress = ":8091"
|
||||
|
||||
// DefaultHeartbeatTimeout is the default heartbeat timeout for the store.
|
||||
DefaultHeartbeatTimeout = 1000 * time.Millisecond
|
||||
|
||||
// DefaultElectionTimeout is the default election timeout for the store.
|
||||
DefaultElectionTimeout = 1000 * time.Millisecond
|
||||
|
||||
// DefaultLeaderLeaseTimeout is the default leader lease for the store.
|
||||
DefaultLeaderLeaseTimeout = 500 * time.Millisecond
|
||||
|
||||
// DefaultCommitTimeout is the default commit timeout for the store.
|
||||
DefaultCommitTimeout = 50 * time.Millisecond
|
||||
|
||||
// DefaultRaftPromotionEnabled is the default for auto promoting a node to a raft node when needed
|
||||
DefaultRaftPromotionEnabled = true
|
||||
|
||||
// DefaultLeaseDuration is the default duration for leases.
|
||||
DefaultLeaseDuration = 60 * time.Second
|
||||
|
||||
// DefaultLoggingEnabled determines if log messages are printed for the meta service
|
||||
DefaultLoggingEnabled = true
|
||||
)
|
||||
|
||||
// Config represents the meta configuration.
|
||||
type Config struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
Dir string `toml:"dir"`
|
||||
|
||||
// this is deprecated. Should use the address from run/config.go
|
||||
BindAddress string `toml:"bind-address"`
|
||||
|
||||
// HTTPBindAddress is the bind address for the metaservice HTTP API
|
||||
HTTPBindAddress string `toml:"http-bind-address"`
|
||||
HTTPSEnabled bool `toml:"https-enabled"`
|
||||
HTTPSCertificate string `toml:"https-certificate"`
|
||||
|
||||
// JoinPeers if specified gives other metastore servers to join this server to the cluster
|
||||
JoinPeers []string `toml:"-"`
|
||||
RetentionAutoCreate bool `toml:"retention-autocreate"`
|
||||
ElectionTimeout toml.Duration `toml:"election-timeout"`
|
||||
HeartbeatTimeout toml.Duration `toml:"heartbeat-timeout"`
|
||||
LeaderLeaseTimeout toml.Duration `toml:"leader-lease-timeout"`
|
||||
CommitTimeout toml.Duration `toml:"commit-timeout"`
|
||||
ClusterTracing bool `toml:"cluster-tracing"`
|
||||
RaftPromotionEnabled bool `toml:"raft-promotion-enabled"`
|
||||
LoggingEnabled bool `toml:"logging-enabled"`
|
||||
PprofEnabled bool `toml:"pprof-enabled"`
|
||||
|
||||
LeaseDuration toml.Duration `toml:"lease-duration"`
|
||||
}
|
||||
|
||||
// NewConfig builds a new configuration with default values.
|
||||
func NewConfig() *Config {
|
||||
return &Config{
|
||||
Enabled: true, // enabled by default
|
||||
BindAddress: DefaultRaftBindAddress,
|
||||
HTTPBindAddress: DefaultHTTPBindAddress,
|
||||
RetentionAutoCreate: true,
|
||||
ElectionTimeout: toml.Duration(DefaultElectionTimeout),
|
||||
HeartbeatTimeout: toml.Duration(DefaultHeartbeatTimeout),
|
||||
LeaderLeaseTimeout: toml.Duration(DefaultLeaderLeaseTimeout),
|
||||
CommitTimeout: toml.Duration(DefaultCommitTimeout),
|
||||
RaftPromotionEnabled: DefaultRaftPromotionEnabled,
|
||||
LeaseDuration: toml.Duration(DefaultLeaseDuration),
|
||||
LoggingEnabled: DefaultLoggingEnabled,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) Validate() error {
|
||||
if c.Enabled && c.Dir == "" {
|
||||
return errors.New("Meta.Dir must be specified")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) defaultHost(addr string) string {
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return addr
|
||||
}
|
||||
|
||||
if host == "" {
|
||||
return net.JoinHostPort(DefaultHostname, port)
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
// DefaultedBindAddress returns the BindAddress normalized with the
|
||||
// hosts name or "localhost" if that could not be determined. If
|
||||
// the BindAddress already has a hostname, BindAddress is returned.
|
||||
func (c *Config) DefaultedBindAddress() string {
|
||||
return c.defaultHost(c.BindAddress)
|
||||
}
|
||||
|
||||
// DefaultedHTTPBindAddress returns the HTTPBindAddress normalized with the
|
||||
// hosts name or "localhost" if that could not be determined. If
|
||||
// the HTTPBindAddress already has a hostname, HTTPBindAddress is returned.
|
||||
func (c *Config) DefaultedHTTPBindAddress() string {
|
||||
return c.defaultHost(c.HTTPBindAddress)
|
||||
}
|
|
@ -5,13 +5,14 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
func TestConfig_Parse(t *testing.T) {
|
||||
// Parse configuration.
|
||||
var c meta.Config
|
||||
if _, err := toml.Decode(`
|
||||
enabled = false
|
||||
dir = "/tmp/foo"
|
||||
election-timeout = "10s"
|
||||
heartbeat-timeout = "20s"
|
||||
|
@ -24,7 +25,9 @@ logging-enabled = false
|
|||
}
|
||||
|
||||
// Validate configuration.
|
||||
if c.Dir != "/tmp/foo" {
|
||||
if c.Enabled == true {
|
||||
t.Fatalf("unexpected enabled: %v", c.Enabled)
|
||||
} else if c.Dir != "/tmp/foo" {
|
||||
t.Fatalf("unexpected dir: %s", c.Dir)
|
||||
} else if time.Duration(c.ElectionTimeout) != 10*time.Second {
|
||||
t.Fatalf("unexpected election timeout: %v", c.ElectionTimeout)
|
|
@ -1,13 +1,14 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta/internal"
|
||||
"github.com/influxdb/influxdb/services/meta/internal"
|
||||
)
|
||||
|
||||
//go:generate protoc --gogo_out=. internal/meta.proto
|
||||
|
@ -28,7 +29,8 @@ type Data struct {
|
|||
Term uint64 // associated raft term
|
||||
Index uint64 // associated raft index
|
||||
ClusterID uint64
|
||||
Nodes []NodeInfo
|
||||
MetaNodes []NodeInfo
|
||||
DataNodes []NodeInfo
|
||||
Databases []DatabaseInfo
|
||||
Users []UserInfo
|
||||
|
||||
|
@ -37,78 +39,42 @@ type Data struct {
|
|||
MaxShardID uint64
|
||||
}
|
||||
|
||||
// Node returns a node by id.
|
||||
func (data *Data) Node(id uint64) *NodeInfo {
|
||||
for i := range data.Nodes {
|
||||
if data.Nodes[i].ID == id {
|
||||
return &data.Nodes[i]
|
||||
// DataNode returns a node by id.
|
||||
func (data *Data) DataNode(id uint64) *NodeInfo {
|
||||
for i := range data.DataNodes {
|
||||
if data.DataNodes[i].ID == id {
|
||||
return &data.DataNodes[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeByHost returns a node by hostname.
|
||||
func (data *Data) NodeByHost(host string) *NodeInfo {
|
||||
for i := range data.Nodes {
|
||||
if data.Nodes[i].Host == host {
|
||||
return &data.Nodes[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateNode adds a node to the metadata.
|
||||
func (data *Data) CreateNode(host string) error {
|
||||
// CreateDataNode adds a node to the metadata.
|
||||
func (data *Data) CreateDataNode(host, tcpHost string) error {
|
||||
// Ensure a node with the same host doesn't already exist.
|
||||
if data.NodeByHost(host) != nil {
|
||||
return ErrNodeExists
|
||||
for _, n := range data.DataNodes {
|
||||
if n.Host == host {
|
||||
return ErrNodeExists
|
||||
}
|
||||
}
|
||||
|
||||
// Append new node.
|
||||
data.MaxNodeID++
|
||||
data.Nodes = append(data.Nodes, NodeInfo{
|
||||
ID: data.MaxNodeID,
|
||||
Host: host,
|
||||
data.DataNodes = append(data.DataNodes, NodeInfo{
|
||||
ID: data.MaxNodeID,
|
||||
Host: host,
|
||||
TCPHost: tcpHost,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteNode removes a node from the metadata.
|
||||
func (data *Data) DeleteNode(id uint64, force bool) error {
|
||||
func (data *Data) DeleteDataNode(id uint64) error {
|
||||
// Node has to be larger than 0 to be real
|
||||
if id == 0 {
|
||||
return ErrNodeIDRequired
|
||||
}
|
||||
// Is this a valid node?
|
||||
nodeInfo := data.Node(id)
|
||||
if nodeInfo == nil {
|
||||
return ErrNodeNotFound
|
||||
}
|
||||
|
||||
// Am I the only node? If so, nothing to do
|
||||
if len(data.Nodes) == 1 {
|
||||
return ErrNodeUnableToDropFinalNode
|
||||
}
|
||||
|
||||
// Determine if there are any any non-replicated nodes and force was not specified
|
||||
if !force {
|
||||
for _, d := range data.Databases {
|
||||
for _, rp := range d.RetentionPolicies {
|
||||
// ignore replicated retention policies
|
||||
if rp.ReplicaN > 1 {
|
||||
continue
|
||||
}
|
||||
for _, sg := range rp.ShardGroups {
|
||||
for _, s := range sg.Shards {
|
||||
if s.OwnedBy(id) && len(s.Owners) == 1 {
|
||||
return ErrShardNotReplicated
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove node id from all shard infos
|
||||
for di, d := range data.Databases {
|
||||
|
@ -131,14 +97,89 @@ func (data *Data) DeleteNode(id uint64, force bool) error {
|
|||
|
||||
// Remove this node from the in memory nodes
|
||||
var nodes []NodeInfo
|
||||
for _, n := range data.Nodes {
|
||||
for _, n := range data.DataNodes {
|
||||
if n.ID == id {
|
||||
continue
|
||||
}
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
data.Nodes = nodes
|
||||
|
||||
if len(nodes) == len(data.DataNodes) {
|
||||
return ErrNodeNotFound
|
||||
}
|
||||
|
||||
data.DataNodes = nodes
|
||||
return nil
|
||||
}
|
||||
|
||||
// MetaNode returns a node by id.
|
||||
func (data *Data) MetaNode(id uint64) *NodeInfo {
|
||||
for i := range data.MetaNodes {
|
||||
if data.MetaNodes[i].ID == id {
|
||||
return &data.MetaNodes[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateMetaNode will add a new meta node to the metastore
|
||||
func (data *Data) CreateMetaNode(httpAddr, tcpAddr string) error {
|
||||
// Ensure a node with the same host doesn't already exist.
|
||||
for _, n := range data.MetaNodes {
|
||||
if n.Host == httpAddr {
|
||||
return ErrNodeExists
|
||||
}
|
||||
}
|
||||
|
||||
// Append new node.
|
||||
data.MaxNodeID++
|
||||
data.MetaNodes = append(data.MetaNodes, NodeInfo{
|
||||
ID: data.MaxNodeID,
|
||||
Host: httpAddr,
|
||||
TCPHost: tcpAddr,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMetaNode will update the information for the single meta
|
||||
// node or create a new metanode. If there are more than 1 meta
|
||||
// nodes already, an error will be returned
|
||||
func (data *Data) SetMetaNode(httpAddr, tcpAddr string) error {
|
||||
if len(data.MetaNodes) > 1 {
|
||||
return fmt.Errorf("can't set meta node when there are more than 1 in the metastore")
|
||||
}
|
||||
|
||||
if len(data.MetaNodes) == 0 {
|
||||
return data.CreateMetaNode(httpAddr, tcpAddr)
|
||||
}
|
||||
|
||||
data.MetaNodes[0].Host = httpAddr
|
||||
data.MetaNodes[0].TCPHost = tcpAddr
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteMetaNode will remove the meta node from the store
|
||||
func (data *Data) DeleteMetaNode(id uint64) error {
|
||||
// Node has to be larger than 0 to be real
|
||||
if id == 0 {
|
||||
return ErrNodeIDRequired
|
||||
}
|
||||
|
||||
var nodes []NodeInfo
|
||||
for _, n := range data.MetaNodes {
|
||||
if n.ID == id {
|
||||
continue
|
||||
}
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
|
||||
if len(nodes) == len(data.MetaNodes) {
|
||||
return ErrNodeNotFound
|
||||
}
|
||||
|
||||
data.MetaNodes = nodes
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -152,13 +193,25 @@ func (data *Data) Database(name string) *DatabaseInfo {
|
|||
return nil
|
||||
}
|
||||
|
||||
// CloneDatabases returns a copy of the databases.
|
||||
func (data *Data) CloneDatabases() []DatabaseInfo {
|
||||
if data.Databases == nil {
|
||||
return nil
|
||||
}
|
||||
dbs := make([]DatabaseInfo, len(data.Databases))
|
||||
for i := range data.Databases {
|
||||
dbs[i] = data.Databases[i].clone()
|
||||
}
|
||||
return dbs
|
||||
}
|
||||
|
||||
// CreateDatabase creates a new database.
|
||||
// Returns an error if name is blank or if a database with the same name already exists.
|
||||
func (data *Data) CreateDatabase(name string) error {
|
||||
if name == "" {
|
||||
return ErrDatabaseNameRequired
|
||||
} else if data.Database(name) != nil {
|
||||
return ErrDatabaseExists
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append new node.
|
||||
|
@ -207,8 +260,12 @@ func (data *Data) CreateRetentionPolicy(database string, rpi *RetentionPolicyInf
|
|||
di := data.Database(database)
|
||||
if di == nil {
|
||||
return influxdb.ErrDatabaseNotFound(database)
|
||||
} else if di.RetentionPolicy(rpi.Name) != nil {
|
||||
return ErrRetentionPolicyExists
|
||||
} else if rp := di.RetentionPolicy(rpi.Name); rp != nil {
|
||||
// RP with that name already exists. Make sure they're the same.
|
||||
if rp.ReplicaN != rpi.ReplicaN || rp.Duration != rpi.Duration {
|
||||
return ErrRetentionPolicyExists
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append new policy.
|
||||
|
@ -230,21 +287,33 @@ func (data *Data) DropRetentionPolicy(database, name string) error {
|
|||
return influxdb.ErrDatabaseNotFound(database)
|
||||
}
|
||||
|
||||
// Prohibit dropping the default retention policy.
|
||||
if di.DefaultRetentionPolicy == name {
|
||||
return ErrRetentionPolicyDefault
|
||||
}
|
||||
|
||||
// Remove from list.
|
||||
for i := range di.RetentionPolicies {
|
||||
if di.RetentionPolicies[i].Name == name {
|
||||
di.RetentionPolicies = append(di.RetentionPolicies[:i], di.RetentionPolicies[i+1:]...)
|
||||
return nil
|
||||
break
|
||||
}
|
||||
}
|
||||
return influxdb.ErrRetentionPolicyNotFound(name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RetentionPolicyUpdate represents retention policy fields to be updated.
|
||||
type RetentionPolicyUpdate struct {
|
||||
Name *string
|
||||
Duration *time.Duration
|
||||
ReplicaN *int
|
||||
}
|
||||
|
||||
// SetName sets the RetentionPolicyUpdate.Name
|
||||
func (rpu *RetentionPolicyUpdate) SetName(v string) { rpu.Name = &v }
|
||||
|
||||
// SetDuration sets the RetentionPolicyUpdate.Duration
|
||||
func (rpu *RetentionPolicyUpdate) SetDuration(v time.Duration) { rpu.Duration = &v }
|
||||
|
||||
// SetReplicaN sets the RetentionPolicyUpdate.ReplicaN
|
||||
func (rpu *RetentionPolicyUpdate) SetReplicaN(v int) { rpu.ReplicaN = &v }
|
||||
|
||||
// UpdateRetentionPolicy updates an existing retention policy.
|
||||
func (data *Data) UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate) error {
|
||||
// Find database.
|
||||
|
@ -355,8 +424,8 @@ func (data *Data) ShardGroupByTimestamp(database, policy string, timestamp time.
|
|||
// CreateShardGroup creates a shard group on a database and policy for a given timestamp.
|
||||
func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time) error {
|
||||
// Ensure there are nodes in the metadata.
|
||||
if len(data.Nodes) == 0 {
|
||||
return ErrNodesRequired
|
||||
if len(data.DataNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find retention policy.
|
||||
|
@ -369,21 +438,21 @@ func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time)
|
|||
|
||||
// Verify that shard group doesn't already exist for this timestamp.
|
||||
if rpi.ShardGroupByTimestamp(timestamp) != nil {
|
||||
return ErrShardGroupExists
|
||||
return nil
|
||||
}
|
||||
|
||||
// Require at least one replica but no more replicas than nodes.
|
||||
replicaN := rpi.ReplicaN
|
||||
if replicaN == 0 {
|
||||
replicaN = 1
|
||||
} else if replicaN > len(data.Nodes) {
|
||||
replicaN = len(data.Nodes)
|
||||
} else if replicaN > len(data.DataNodes) {
|
||||
replicaN = len(data.DataNodes)
|
||||
}
|
||||
|
||||
// Determine shard count by node count divided by replication factor.
|
||||
// This will ensure nodes will get distributed across nodes evenly and
|
||||
// replicated the correct number of times.
|
||||
shardN := len(data.Nodes) / replicaN
|
||||
shardN := len(data.DataNodes) / replicaN
|
||||
|
||||
// Create the shard group.
|
||||
data.MaxShardGroupID++
|
||||
|
@ -401,11 +470,11 @@ func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time)
|
|||
|
||||
// Assign data nodes to shards via round robin.
|
||||
// Start from a repeatably "random" place in the node list.
|
||||
nodeIndex := int(data.Index % uint64(len(data.Nodes)))
|
||||
nodeIndex := int(data.Index % uint64(len(data.DataNodes)))
|
||||
for i := range sgi.Shards {
|
||||
si := &sgi.Shards[i]
|
||||
for j := 0; j < replicaN; j++ {
|
||||
nodeID := data.Nodes[nodeIndex%len(data.Nodes)].ID
|
||||
nodeID := data.DataNodes[nodeIndex%len(data.DataNodes)].ID
|
||||
si.Owners = append(si.Owners, ShardOwner{NodeID: nodeID})
|
||||
nodeIndex++
|
||||
}
|
||||
|
@ -632,10 +701,17 @@ func (data *Data) Clone() *Data {
|
|||
other := *data
|
||||
|
||||
// Copy nodes.
|
||||
if data.Nodes != nil {
|
||||
other.Nodes = make([]NodeInfo, len(data.Nodes))
|
||||
for i := range data.Nodes {
|
||||
other.Nodes[i] = data.Nodes[i].clone()
|
||||
if data.DataNodes != nil {
|
||||
other.DataNodes = make([]NodeInfo, len(data.DataNodes))
|
||||
for i := range data.DataNodes {
|
||||
other.DataNodes[i] = data.DataNodes[i].clone()
|
||||
}
|
||||
}
|
||||
|
||||
if data.MetaNodes != nil {
|
||||
other.MetaNodes = make([]NodeInfo, len(data.MetaNodes))
|
||||
for i := range data.MetaNodes {
|
||||
other.MetaNodes[i] = data.MetaNodes[i].clone()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -670,9 +746,14 @@ func (data *Data) marshal() *internal.Data {
|
|||
MaxShardID: proto.Uint64(data.MaxShardID),
|
||||
}
|
||||
|
||||
pb.Nodes = make([]*internal.NodeInfo, len(data.Nodes))
|
||||
for i := range data.Nodes {
|
||||
pb.Nodes[i] = data.Nodes[i].marshal()
|
||||
pb.DataNodes = make([]*internal.NodeInfo, len(data.DataNodes))
|
||||
for i := range data.DataNodes {
|
||||
pb.DataNodes[i] = data.DataNodes[i].marshal()
|
||||
}
|
||||
|
||||
pb.MetaNodes = make([]*internal.NodeInfo, len(data.MetaNodes))
|
||||
for i := range data.MetaNodes {
|
||||
pb.MetaNodes[i] = data.MetaNodes[i].marshal()
|
||||
}
|
||||
|
||||
pb.Databases = make([]*internal.DatabaseInfo, len(data.Databases))
|
||||
|
@ -698,9 +779,22 @@ func (data *Data) unmarshal(pb *internal.Data) {
|
|||
data.MaxShardGroupID = pb.GetMaxShardGroupID()
|
||||
data.MaxShardID = pb.GetMaxShardID()
|
||||
|
||||
data.Nodes = make([]NodeInfo, len(pb.GetNodes()))
|
||||
for i, x := range pb.GetNodes() {
|
||||
data.Nodes[i].unmarshal(x)
|
||||
// TODO: Nodes is deprecated. This is being left here to make migration from 0.9.x to 0.10.0 possible
|
||||
if len(pb.GetNodes()) > 0 {
|
||||
data.DataNodes = make([]NodeInfo, len(pb.GetNodes()))
|
||||
for i, x := range pb.GetNodes() {
|
||||
data.DataNodes[i].unmarshal(x)
|
||||
}
|
||||
} else {
|
||||
data.DataNodes = make([]NodeInfo, len(pb.GetDataNodes()))
|
||||
for i, x := range pb.GetDataNodes() {
|
||||
data.DataNodes[i].unmarshal(x)
|
||||
}
|
||||
}
|
||||
|
||||
data.MetaNodes = make([]NodeInfo, len(pb.GetMetaNodes()))
|
||||
for i, x := range pb.GetMetaNodes() {
|
||||
data.MetaNodes[i].unmarshal(x)
|
||||
}
|
||||
|
||||
data.Databases = make([]DatabaseInfo, len(pb.GetDatabases()))
|
||||
|
@ -731,8 +825,9 @@ func (data *Data) UnmarshalBinary(buf []byte) error {
|
|||
|
||||
// NodeInfo represents information about a single node in the cluster.
|
||||
type NodeInfo struct {
|
||||
ID uint64
|
||||
Host string
|
||||
ID uint64
|
||||
Host string
|
||||
TCPHost string
|
||||
}
|
||||
|
||||
// clone returns a deep copy of ni.
|
||||
|
@ -743,6 +838,7 @@ func (ni NodeInfo) marshal() *internal.NodeInfo {
|
|||
pb := &internal.NodeInfo{}
|
||||
pb.ID = proto.Uint64(ni.ID)
|
||||
pb.Host = proto.String(ni.Host)
|
||||
pb.TCPHost = proto.String(ni.TCPHost)
|
||||
return pb
|
||||
}
|
||||
|
||||
|
@ -750,6 +846,7 @@ func (ni NodeInfo) marshal() *internal.NodeInfo {
|
|||
func (ni *NodeInfo) unmarshal(pb *internal.NodeInfo) {
|
||||
ni.ID = pb.GetID()
|
||||
ni.Host = pb.GetHost()
|
||||
ni.TCPHost = pb.GetTCPHost()
|
||||
}
|
||||
|
||||
// NodeInfos is a slice of NodeInfo used for sorting
|
||||
|
@ -930,6 +1027,11 @@ func (rpi *RetentionPolicyInfo) marshal() *internal.RetentionPolicyInfo {
|
|||
pb.ShardGroups[i] = sgi.marshal()
|
||||
}
|
||||
|
||||
pb.Subscriptions = make([]*internal.SubscriptionInfo, len(rpi.Subscriptions))
|
||||
for i, sub := range rpi.Subscriptions {
|
||||
pb.Subscriptions[i] = sub.marshal()
|
||||
}
|
||||
|
||||
return pb
|
||||
}
|
||||
|
|
@ -7,120 +7,108 @@ import (
|
|||
|
||||
var (
|
||||
// ErrStoreOpen is returned when opening an already open store.
|
||||
ErrStoreOpen = newError("store already open")
|
||||
ErrStoreOpen = errors.New("store already open")
|
||||
|
||||
// ErrStoreClosed is returned when closing an already closed store.
|
||||
ErrStoreClosed = newError("raft store already closed")
|
||||
ErrStoreClosed = errors.New("raft store already closed")
|
||||
|
||||
// ErrTooManyPeers is returned when more than 3 peers are used.
|
||||
ErrTooManyPeers = newError("too many peers; influxdb v0.9.0 is limited to 3 nodes in a cluster")
|
||||
ErrTooManyPeers = errors.New("too many peers; influxdb v0.9.0 is limited to 3 nodes in a cluster")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNodeExists is returned when creating an already existing node.
|
||||
ErrNodeExists = newError("node already exists")
|
||||
ErrNodeExists = errors.New("node already exists")
|
||||
|
||||
// ErrNodeNotFound is returned when mutating a node that doesn't exist.
|
||||
ErrNodeNotFound = newError("node not found")
|
||||
ErrNodeNotFound = errors.New("node not found")
|
||||
|
||||
// ErrNodesRequired is returned when at least one node is required for an operation.
|
||||
// This occurs when creating a shard group.
|
||||
ErrNodesRequired = newError("at least one node required")
|
||||
ErrNodesRequired = errors.New("at least one node required")
|
||||
|
||||
// ErrNodeIDRequired is returned when using a zero node id.
|
||||
ErrNodeIDRequired = newError("node id must be greater than 0")
|
||||
ErrNodeIDRequired = errors.New("node id must be greater than 0")
|
||||
|
||||
// ErrNodeUnableToDropFinalNode is returned if the node being dropped is the last
|
||||
// node in the cluster
|
||||
ErrNodeUnableToDropFinalNode = newError("unable to drop the final node in a cluster")
|
||||
ErrNodeUnableToDropFinalNode = errors.New("unable to drop the final node in a cluster")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDatabaseExists is returned when creating an already existing database.
|
||||
ErrDatabaseExists = newError("database already exists")
|
||||
ErrDatabaseExists = errors.New("database already exists")
|
||||
|
||||
// ErrDatabaseNameRequired is returned when creating a database without a name.
|
||||
ErrDatabaseNameRequired = newError("database name required")
|
||||
ErrDatabaseNameRequired = errors.New("database name required")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrRetentionPolicyExists is returned when creating an already existing policy.
|
||||
ErrRetentionPolicyExists = newError("retention policy already exists")
|
||||
ErrRetentionPolicyExists = errors.New("retention policy already exists")
|
||||
|
||||
// ErrRetentionPolicyDefault is returned when attempting a prohibited operation
|
||||
// on a default retention policy.
|
||||
ErrRetentionPolicyDefault = newError("retention policy is default")
|
||||
ErrRetentionPolicyDefault = errors.New("retention policy is default")
|
||||
|
||||
// ErrRetentionPolicyNameRequired is returned when creating a policy without a name.
|
||||
ErrRetentionPolicyNameRequired = newError("retention policy name required")
|
||||
ErrRetentionPolicyNameRequired = errors.New("retention policy name required")
|
||||
|
||||
// ErrRetentionPolicyNameExists is returned when renaming a policy to
|
||||
// the same name as another existing policy.
|
||||
ErrRetentionPolicyNameExists = newError("retention policy name already exists")
|
||||
ErrRetentionPolicyNameExists = errors.New("retention policy name already exists")
|
||||
|
||||
// ErrRetentionPolicyDurationTooLow is returned when updating a retention
|
||||
// policy that has a duration lower than the allowed minimum.
|
||||
ErrRetentionPolicyDurationTooLow = newError(fmt.Sprintf("retention policy duration must be at least %s",
|
||||
ErrRetentionPolicyDurationTooLow = errors.New(fmt.Sprintf("retention policy duration must be at least %s",
|
||||
MinRetentionPolicyDuration))
|
||||
|
||||
// ErrReplicationFactorTooLow is returned when the replication factor is not in an
|
||||
// acceptable range.
|
||||
ErrReplicationFactorTooLow = newError("replication factor must be greater than 0")
|
||||
ErrReplicationFactorTooLow = errors.New("replication factor must be greater than 0")
|
||||
|
||||
// ErrRetentionPolicyNotFound is returned when an expected retention policy can't be found.
|
||||
ErrRetentionPolicyNotFound = errors.New("retention policy not found")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrShardGroupExists is returned when creating an already existing shard group.
|
||||
ErrShardGroupExists = newError("shard group already exists")
|
||||
ErrShardGroupExists = errors.New("shard group already exists")
|
||||
|
||||
// ErrShardGroupNotFound is returned when mutating a shard group that doesn't exist.
|
||||
ErrShardGroupNotFound = newError("shard group not found")
|
||||
ErrShardGroupNotFound = errors.New("shard group not found")
|
||||
|
||||
// ErrShardNotReplicated is returned if the node requested to be dropped has
|
||||
// the last copy of a shard present and the force keyword was not used
|
||||
ErrShardNotReplicated = newError("shard not replicated")
|
||||
ErrShardNotReplicated = errors.New("shard not replicated")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrContinuousQueryExists is returned when creating an already existing continuous query.
|
||||
ErrContinuousQueryExists = newError("continuous query already exists")
|
||||
ErrContinuousQueryExists = errors.New("continuous query already exists")
|
||||
|
||||
// ErrContinuousQueryNotFound is returned when removing a continuous query that doesn't exist.
|
||||
ErrContinuousQueryNotFound = newError("continuous query not found")
|
||||
ErrContinuousQueryNotFound = errors.New("continuous query not found")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrSubscriptionExists is returned when creating an already existing subscription.
|
||||
ErrSubscriptionExists = newError("subscription already exists")
|
||||
ErrSubscriptionExists = errors.New("subscription already exists")
|
||||
|
||||
// ErrSubscriptionNotFound is returned when removing a subscription that doesn't exist.
|
||||
ErrSubscriptionNotFound = newError("subscription not found")
|
||||
ErrSubscriptionNotFound = errors.New("subscription not found")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUserExists is returned when creating an already existing user.
|
||||
ErrUserExists = newError("user already exists")
|
||||
ErrUserExists = errors.New("user already exists")
|
||||
|
||||
// ErrUserNotFound is returned when mutating a user that doesn't exist.
|
||||
ErrUserNotFound = newError("user not found")
|
||||
ErrUserNotFound = errors.New("user not found")
|
||||
|
||||
// ErrUsernameRequired is returned when creating a user without a username.
|
||||
ErrUsernameRequired = newError("username required")
|
||||
ErrUsernameRequired = errors.New("username required")
|
||||
|
||||
// ErrAuthenticate is returned when authentication fails.
|
||||
ErrAuthenticate = errors.New("authentication failed")
|
||||
)
|
||||
|
||||
// errLookup stores a mapping of error strings to well defined error types.
|
||||
var errLookup = make(map[string]error)
|
||||
|
||||
func newError(msg string) error {
|
||||
err := errors.New(msg)
|
||||
errLookup[err.Error()] = err
|
||||
return err
|
||||
}
|
||||
|
||||
// lookupError returns a known error reference, if one exists.
|
||||
// Otherwise returns err.
|
||||
func lookupError(err error) error {
|
||||
if e, ok := errLookup[err.Error()]; ok {
|
||||
return e
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,501 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/influxdb/influxdb/services/meta/internal"
|
||||
"github.com/influxdb/influxdb/uuid"
|
||||
)
|
||||
|
||||
// handler represents an HTTP handler for the meta service.
|
||||
type handler struct {
|
||||
config *Config
|
||||
Version string
|
||||
|
||||
logger *log.Logger
|
||||
loggingEnabled bool // Log every HTTP access.
|
||||
pprofEnabled bool
|
||||
store interface {
|
||||
afterIndex(index uint64) <-chan struct{}
|
||||
index() uint64
|
||||
leader() string
|
||||
leaderHTTP() string
|
||||
snapshot() (*Data, error)
|
||||
apply(b []byte) error
|
||||
join(n *NodeInfo) error
|
||||
otherMetaServersHTTP() []string
|
||||
}
|
||||
s *Service
|
||||
|
||||
mu sync.RWMutex
|
||||
closing chan struct{}
|
||||
leases *Leases
|
||||
}
|
||||
|
||||
// newHandler returns a new instance of handler with routes.
|
||||
func newHandler(c *Config, s *Service) *handler {
|
||||
h := &handler{
|
||||
s: s,
|
||||
config: c,
|
||||
logger: log.New(os.Stderr, "[meta-http] ", log.LstdFlags),
|
||||
loggingEnabled: c.LoggingEnabled,
|
||||
closing: make(chan struct{}),
|
||||
leases: NewLeases(time.Duration(c.LeaseDuration)),
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// SetRoutes sets the provided routes on the handler.
|
||||
func (h *handler) WrapHandler(name string, hf http.HandlerFunc) http.Handler {
|
||||
var handler http.Handler
|
||||
handler = http.HandlerFunc(hf)
|
||||
handler = gzipFilter(handler)
|
||||
handler = versionHeader(handler, h)
|
||||
handler = requestID(handler)
|
||||
if h.loggingEnabled {
|
||||
handler = logging(handler, name, h.logger)
|
||||
}
|
||||
handler = recovery(handler, name, h.logger) // make sure recovery is always last
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// ServeHTTP responds to HTTP request to the handler.
|
||||
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
switch r.URL.Path {
|
||||
case "/ping":
|
||||
h.WrapHandler("ping", h.servePing).ServeHTTP(w, r)
|
||||
case "/lease":
|
||||
h.WrapHandler("lease", h.serveLease).ServeHTTP(w, r)
|
||||
default:
|
||||
h.WrapHandler("snapshot", h.serveSnapshot).ServeHTTP(w, r)
|
||||
}
|
||||
case "POST":
|
||||
h.WrapHandler("execute", h.serveExec).ServeHTTP(w, r)
|
||||
default:
|
||||
http.Error(w, "", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) Close() error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
select {
|
||||
case <-h.closing:
|
||||
// do nothing here
|
||||
default:
|
||||
close(h.closing)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *handler) isClosed() bool {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
select {
|
||||
case <-h.closing:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// serveExec executes the requested command.
|
||||
func (h *handler) serveExec(w http.ResponseWriter, r *http.Request) {
|
||||
if h.isClosed() {
|
||||
h.httpError(fmt.Errorf("server closed"), w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Read the command from the request body.
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
h.httpError(err, w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if r.URL.Path == "/join" {
|
||||
n := &NodeInfo{}
|
||||
if err := json.Unmarshal(body, n); err != nil {
|
||||
h.httpError(err, w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
err := h.store.join(n)
|
||||
if err == raft.ErrNotLeader {
|
||||
l := h.store.leaderHTTP()
|
||||
if l == "" {
|
||||
// No cluster leader. Client will have to try again later.
|
||||
h.httpError(errors.New("no leader"), w, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
scheme := "http://"
|
||||
if h.config.HTTPSEnabled {
|
||||
scheme = "https://"
|
||||
}
|
||||
|
||||
l = scheme + l + "/join"
|
||||
http.Redirect(w, r, l, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
h.httpError(err, w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure it's a valid command.
|
||||
if err := validateCommand(body); err != nil {
|
||||
h.httpError(err, w, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Apply the command to the store.
|
||||
var resp *internal.Response
|
||||
if err := h.store.apply(body); err != nil {
|
||||
// If we aren't the leader, redirect client to the leader.
|
||||
if err == raft.ErrNotLeader {
|
||||
l := h.store.leaderHTTP()
|
||||
if l == "" {
|
||||
// No cluster leader. Client will have to try again later.
|
||||
h.httpError(errors.New("no leader"), w, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
scheme := "http://"
|
||||
if h.config.HTTPSEnabled {
|
||||
scheme = "https://"
|
||||
}
|
||||
|
||||
l = scheme + l + "/execute"
|
||||
http.Redirect(w, r, l, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
|
||||
// Error wasn't a leadership error so pass it back to client.
|
||||
resp = &internal.Response{
|
||||
OK: proto.Bool(false),
|
||||
Error: proto.String(err.Error()),
|
||||
}
|
||||
} else {
|
||||
// Apply was successful. Return the new store index to the client.
|
||||
resp = &internal.Response{
|
||||
OK: proto.Bool(false),
|
||||
Index: proto.Uint64(h.store.index()),
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal the response.
|
||||
b, err := proto.Marshal(resp)
|
||||
if err != nil {
|
||||
h.httpError(err, w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Send response to client.
|
||||
w.Header().Add("Content-Type", "application/octet-stream")
|
||||
w.Write(b)
|
||||
}
|
||||
|
||||
func validateCommand(b []byte) error {
|
||||
// Ensure command can be deserialized before applying.
|
||||
if err := proto.Unmarshal(b, &internal.Command{}); err != nil {
|
||||
return fmt.Errorf("unable to unmarshal command: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// serveSnapshot is a long polling http connection to server cache updates
|
||||
func (h *handler) serveSnapshot(w http.ResponseWriter, r *http.Request) {
|
||||
if h.isClosed() {
|
||||
h.httpError(fmt.Errorf("server closed"), w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// get the current index that client has
|
||||
index, err := strconv.ParseUint(r.URL.Query().Get("index"), 10, 64)
|
||||
if err != nil {
|
||||
http.Error(w, "error parsing index", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-h.store.afterIndex(index):
|
||||
// Send updated snapshot to client.
|
||||
ss, err := h.store.snapshot()
|
||||
if err != nil {
|
||||
h.httpError(err, w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
b, err := ss.MarshalBinary()
|
||||
if err != nil {
|
||||
h.httpError(err, w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(b)
|
||||
return
|
||||
case <-w.(http.CloseNotifier).CloseNotify():
|
||||
// Client closed the connection so we're done.
|
||||
return
|
||||
case <-h.closing:
|
||||
h.httpError(fmt.Errorf("server closed"), w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// servePing will return if the server is up, or if specified will check the status
|
||||
// of the other metaservers as well
|
||||
func (h *handler) servePing(w http.ResponseWriter, r *http.Request) {
|
||||
// if they're not asking to check all servers, just return who we think
|
||||
// the leader is
|
||||
if r.URL.Query().Get("all") == "" {
|
||||
w.Write([]byte(h.store.leader()))
|
||||
return
|
||||
}
|
||||
|
||||
leader := h.store.leader()
|
||||
healthy := true
|
||||
for _, n := range h.store.otherMetaServersHTTP() {
|
||||
scheme := "http://"
|
||||
if h.config.HTTPSEnabled {
|
||||
scheme = "https://"
|
||||
}
|
||||
url := scheme + n + "/ping"
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
healthy = false
|
||||
break
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
healthy = false
|
||||
break
|
||||
}
|
||||
|
||||
if leader != string(b) {
|
||||
healthy = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if healthy {
|
||||
w.Write([]byte(h.store.leader()))
|
||||
return
|
||||
}
|
||||
|
||||
h.httpError(fmt.Errorf("one or more metaservers not up"), w, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// serveLease
|
||||
func (h *handler) serveLease(w http.ResponseWriter, r *http.Request) {
|
||||
// Redirect to leader if necessary.
|
||||
leader := h.store.leaderHTTP()
|
||||
if leader != h.s.httpAddr {
|
||||
if leader == "" {
|
||||
// No cluster leader. Client will have to try again later.
|
||||
h.httpError(errors.New("no leader"), w, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
scheme := "http://"
|
||||
if h.config.HTTPSEnabled {
|
||||
scheme = "https://"
|
||||
}
|
||||
|
||||
leader = scheme + leader + "/lease"
|
||||
http.Redirect(w, r, leader, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
|
||||
q := r.URL.Query()
|
||||
|
||||
// Get the requested lease name.
|
||||
name := q.Get("name")
|
||||
if name == "" {
|
||||
http.Error(w, "lease name required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
// Get the ID of the requesting node.
|
||||
nodeIDStr := q.Get("nodeid")
|
||||
if name == "" {
|
||||
http.Error(w, "node ID required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
// Convert node ID to an int.
|
||||
nodeID, err := strconv.ParseUint(nodeIDStr, 10, 64)
|
||||
if err != nil {
|
||||
http.Error(w, "invalid node ID", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Try to acquire the requested lease.
|
||||
// Always returns a lease. err determins if we own it.
|
||||
l, err := h.leases.Acquire(name, nodeID)
|
||||
// Marshal the lease to JSON.
|
||||
b, e := json.Marshal(l)
|
||||
if e != nil {
|
||||
h.httpError(e, w, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// Write HTTP status.
|
||||
if err != nil {
|
||||
// Another node owns the lease.
|
||||
w.WriteHeader(http.StatusConflict)
|
||||
} else {
|
||||
// Lease successfully acquired.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
// Write the lease data.
|
||||
w.Header().Add("content-type", "application/json")
|
||||
w.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
type gzipResponseWriter struct {
|
||||
io.Writer
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
func (w gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
return w.Writer.Write(b)
|
||||
}
|
||||
|
||||
func (w gzipResponseWriter) Flush() {
|
||||
w.Writer.(*gzip.Writer).Flush()
|
||||
}
|
||||
|
||||
func (w gzipResponseWriter) CloseNotify() <-chan bool {
|
||||
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// determines if the client can accept compressed responses, and encodes accordingly
|
||||
func gzipFilter(inner http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
inner.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
gz := gzip.NewWriter(w)
|
||||
defer gz.Close()
|
||||
gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
||||
inner.ServeHTTP(gzw, r)
|
||||
})
|
||||
}
|
||||
|
||||
// versionHeader takes a HTTP handler and returns a HTTP handler
|
||||
// and adds the X-INFLUXBD-VERSION header to outgoing responses.
|
||||
func versionHeader(inner http.Handler, h *handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add("X-InfluxDB-Version", h.Version)
|
||||
inner.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func requestID(inner http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
uid := uuid.TimeUUID()
|
||||
r.Header.Set("Request-Id", uid.String())
|
||||
w.Header().Set("Request-Id", r.Header.Get("Request-Id"))
|
||||
|
||||
inner.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func logging(inner http.Handler, name string, weblog *log.Logger) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
l := &responseLogger{w: w}
|
||||
inner.ServeHTTP(l, r)
|
||||
logLine := buildLogLine(l, r, start)
|
||||
weblog.Println(logLine)
|
||||
})
|
||||
}
|
||||
|
||||
func recovery(inner http.Handler, name string, weblog *log.Logger) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
l := &responseLogger{w: w}
|
||||
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
logLine := buildLogLine(l, r, start)
|
||||
logLine = fmt.Sprintf(`%s [panic:%s]`, logLine, err)
|
||||
weblog.Println(logLine)
|
||||
}
|
||||
}()
|
||||
|
||||
inner.ServeHTTP(l, r)
|
||||
})
|
||||
}
|
||||
|
||||
func (h *handler) httpError(err error, w http.ResponseWriter, status int) {
|
||||
if h.loggingEnabled {
|
||||
h.logger.Println(err)
|
||||
}
|
||||
http.Error(w, "", status)
|
||||
}
|
||||
|
||||
type Lease struct {
|
||||
Name string `json:"name"`
|
||||
Expiration time.Time `json:"expiration"`
|
||||
Owner uint64 `json:"owner"`
|
||||
}
|
||||
|
||||
type Leases struct {
|
||||
mu sync.Mutex
|
||||
m map[string]*Lease
|
||||
d time.Duration
|
||||
}
|
||||
|
||||
func NewLeases(d time.Duration) *Leases {
|
||||
return &Leases{
|
||||
m: make(map[string]*Lease),
|
||||
d: d,
|
||||
}
|
||||
}
|
||||
|
||||
func (leases *Leases) Acquire(name string, nodeID uint64) (*Lease, error) {
|
||||
leases.mu.Lock()
|
||||
defer leases.mu.Unlock()
|
||||
|
||||
l, ok := leases.m[name]
|
||||
if ok {
|
||||
if time.Now().After(l.Expiration) || l.Owner == nodeID {
|
||||
l.Expiration = time.Now().Add(leases.d)
|
||||
l.Owner = nodeID
|
||||
return l, nil
|
||||
}
|
||||
return l, errors.New("another node has the lease")
|
||||
}
|
||||
|
||||
l = &Lease{
|
||||
Name: name,
|
||||
Expiration: time.Now().Add(leases.d),
|
||||
Owner: nodeID,
|
||||
}
|
||||
|
||||
leases.m[name] = l
|
||||
|
||||
return l, nil
|
||||
}
|
|
@ -43,15 +43,13 @@ It has these top-level messages:
|
|||
CreateSubscriptionCommand
|
||||
DropSubscriptionCommand
|
||||
RemovePeerCommand
|
||||
CreateMetaNodeCommand
|
||||
CreateDataNodeCommand
|
||||
UpdateDataNodeCommand
|
||||
DeleteMetaNodeCommand
|
||||
DeleteDataNodeCommand
|
||||
Response
|
||||
ResponseHeader
|
||||
ErrorResponse
|
||||
FetchDataRequest
|
||||
FetchDataResponse
|
||||
JoinRequest
|
||||
JoinResponse
|
||||
PromoteRaftRequest
|
||||
PromoteRaftResponse
|
||||
SetMetaNodeCommand
|
||||
*/
|
||||
package internal
|
||||
|
||||
|
@ -64,45 +62,6 @@ var _ = proto.Marshal
|
|||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
type RPCType int32
|
||||
|
||||
const (
|
||||
RPCType_Error RPCType = 1
|
||||
RPCType_FetchData RPCType = 2
|
||||
RPCType_Join RPCType = 3
|
||||
RPCType_PromoteRaft RPCType = 4
|
||||
)
|
||||
|
||||
var RPCType_name = map[int32]string{
|
||||
1: "Error",
|
||||
2: "FetchData",
|
||||
3: "Join",
|
||||
4: "PromoteRaft",
|
||||
}
|
||||
var RPCType_value = map[string]int32{
|
||||
"Error": 1,
|
||||
"FetchData": 2,
|
||||
"Join": 3,
|
||||
"PromoteRaft": 4,
|
||||
}
|
||||
|
||||
func (x RPCType) Enum() *RPCType {
|
||||
p := new(RPCType)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x RPCType) String() string {
|
||||
return proto.EnumName(RPCType_name, int32(x))
|
||||
}
|
||||
func (x *RPCType) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(RPCType_value, data, "RPCType")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = RPCType(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Command_Type int32
|
||||
|
||||
const (
|
||||
|
@ -128,6 +87,12 @@ const (
|
|||
Command_CreateSubscriptionCommand Command_Type = 21
|
||||
Command_DropSubscriptionCommand Command_Type = 22
|
||||
Command_RemovePeerCommand Command_Type = 23
|
||||
Command_CreateMetaNodeCommand Command_Type = 24
|
||||
Command_CreateDataNodeCommand Command_Type = 25
|
||||
Command_UpdateDataNodeCommand Command_Type = 26
|
||||
Command_DeleteMetaNodeCommand Command_Type = 27
|
||||
Command_DeleteDataNodeCommand Command_Type = 28
|
||||
Command_SetMetaNodeCommand Command_Type = 29
|
||||
)
|
||||
|
||||
var Command_Type_name = map[int32]string{
|
||||
|
@ -153,6 +118,12 @@ var Command_Type_name = map[int32]string{
|
|||
21: "CreateSubscriptionCommand",
|
||||
22: "DropSubscriptionCommand",
|
||||
23: "RemovePeerCommand",
|
||||
24: "CreateMetaNodeCommand",
|
||||
25: "CreateDataNodeCommand",
|
||||
26: "UpdateDataNodeCommand",
|
||||
27: "DeleteMetaNodeCommand",
|
||||
28: "DeleteDataNodeCommand",
|
||||
29: "SetMetaNodeCommand",
|
||||
}
|
||||
var Command_Type_value = map[string]int32{
|
||||
"CreateNodeCommand": 1,
|
||||
|
@ -177,6 +148,12 @@ var Command_Type_value = map[string]int32{
|
|||
"CreateSubscriptionCommand": 21,
|
||||
"DropSubscriptionCommand": 22,
|
||||
"RemovePeerCommand": 23,
|
||||
"CreateMetaNodeCommand": 24,
|
||||
"CreateDataNodeCommand": 25,
|
||||
"UpdateDataNodeCommand": 26,
|
||||
"DeleteMetaNodeCommand": 27,
|
||||
"DeleteDataNodeCommand": 28,
|
||||
"SetMetaNodeCommand": 29,
|
||||
}
|
||||
|
||||
func (x Command_Type) Enum() *Command_Type {
|
||||
|
@ -206,6 +183,8 @@ type Data struct {
|
|||
MaxNodeID *uint64 `protobuf:"varint,7,req,name=MaxNodeID" json:"MaxNodeID,omitempty"`
|
||||
MaxShardGroupID *uint64 `protobuf:"varint,8,req,name=MaxShardGroupID" json:"MaxShardGroupID,omitempty"`
|
||||
MaxShardID *uint64 `protobuf:"varint,9,req,name=MaxShardID" json:"MaxShardID,omitempty"`
|
||||
DataNodes []*NodeInfo `protobuf:"bytes,10,rep,name=DataNodes" json:"DataNodes,omitempty"`
|
||||
MetaNodes []*NodeInfo `protobuf:"bytes,11,rep,name=MetaNodes" json:"MetaNodes,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
|
@ -276,9 +255,24 @@ func (m *Data) GetMaxShardID() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Data) GetDataNodes() []*NodeInfo {
|
||||
if m != nil {
|
||||
return m.DataNodes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Data) GetMetaNodes() []*NodeInfo {
|
||||
if m != nil {
|
||||
return m.MetaNodes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type NodeInfo struct {
|
||||
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||
Host *string `protobuf:"bytes,2,req,name=Host" json:"Host,omitempty"`
|
||||
TCPHost *string `protobuf:"bytes,3,opt,name=TCPHost" json:"TCPHost,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
|
@ -300,6 +294,13 @@ func (m *NodeInfo) GetHost() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (m *NodeInfo) GetTCPHost() string {
|
||||
if m != nil && m.TCPHost != nil {
|
||||
return *m.TCPHost
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DatabaseInfo struct {
|
||||
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||
DefaultRetentionPolicy *string `protobuf:"bytes,2,req,name=DefaultRetentionPolicy" json:"DefaultRetentionPolicy,omitempty"`
|
||||
|
@ -1380,7 +1381,7 @@ var E_DropSubscriptionCommand_Command = &proto.ExtensionDesc{
|
|||
}
|
||||
|
||||
type RemovePeerCommand struct {
|
||||
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||
ID *uint64 `protobuf:"varint,1,opt,name=ID" json:"ID,omitempty"`
|
||||
Addr *string `protobuf:"bytes,2,req,name=Addr" json:"Addr,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
@ -1411,6 +1412,166 @@ var E_RemovePeerCommand_Command = &proto.ExtensionDesc{
|
|||
Tag: "bytes,123,opt,name=command",
|
||||
}
|
||||
|
||||
type CreateMetaNodeCommand struct {
|
||||
HTTPAddr *string `protobuf:"bytes,1,req,name=HTTPAddr" json:"HTTPAddr,omitempty"`
|
||||
TCPAddr *string `protobuf:"bytes,2,req,name=TCPAddr" json:"TCPAddr,omitempty"`
|
||||
Rand *uint64 `protobuf:"varint,3,req,name=Rand" json:"Rand,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateMetaNodeCommand) Reset() { *m = CreateMetaNodeCommand{} }
|
||||
func (m *CreateMetaNodeCommand) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateMetaNodeCommand) ProtoMessage() {}
|
||||
|
||||
func (m *CreateMetaNodeCommand) GetHTTPAddr() string {
|
||||
if m != nil && m.HTTPAddr != nil {
|
||||
return *m.HTTPAddr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CreateMetaNodeCommand) GetTCPAddr() string {
|
||||
if m != nil && m.TCPAddr != nil {
|
||||
return *m.TCPAddr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CreateMetaNodeCommand) GetRand() uint64 {
|
||||
if m != nil && m.Rand != nil {
|
||||
return *m.Rand
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var E_CreateMetaNodeCommand_Command = &proto.ExtensionDesc{
|
||||
ExtendedType: (*Command)(nil),
|
||||
ExtensionType: (*CreateMetaNodeCommand)(nil),
|
||||
Field: 124,
|
||||
Name: "internal.CreateMetaNodeCommand.command",
|
||||
Tag: "bytes,124,opt,name=command",
|
||||
}
|
||||
|
||||
type CreateDataNodeCommand struct {
|
||||
HTTPAddr *string `protobuf:"bytes,1,req,name=HTTPAddr" json:"HTTPAddr,omitempty"`
|
||||
TCPAddr *string `protobuf:"bytes,2,req,name=TCPAddr" json:"TCPAddr,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateDataNodeCommand) Reset() { *m = CreateDataNodeCommand{} }
|
||||
func (m *CreateDataNodeCommand) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateDataNodeCommand) ProtoMessage() {}
|
||||
|
||||
func (m *CreateDataNodeCommand) GetHTTPAddr() string {
|
||||
if m != nil && m.HTTPAddr != nil {
|
||||
return *m.HTTPAddr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CreateDataNodeCommand) GetTCPAddr() string {
|
||||
if m != nil && m.TCPAddr != nil {
|
||||
return *m.TCPAddr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var E_CreateDataNodeCommand_Command = &proto.ExtensionDesc{
|
||||
ExtendedType: (*Command)(nil),
|
||||
ExtensionType: (*CreateDataNodeCommand)(nil),
|
||||
Field: 125,
|
||||
Name: "internal.CreateDataNodeCommand.command",
|
||||
Tag: "bytes,125,opt,name=command",
|
||||
}
|
||||
|
||||
type UpdateDataNodeCommand struct {
|
||||
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||
Host *string `protobuf:"bytes,2,req,name=Host" json:"Host,omitempty"`
|
||||
TCPHost *string `protobuf:"bytes,3,req,name=TCPHost" json:"TCPHost,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UpdateDataNodeCommand) Reset() { *m = UpdateDataNodeCommand{} }
|
||||
func (m *UpdateDataNodeCommand) String() string { return proto.CompactTextString(m) }
|
||||
func (*UpdateDataNodeCommand) ProtoMessage() {}
|
||||
|
||||
func (m *UpdateDataNodeCommand) GetID() uint64 {
|
||||
if m != nil && m.ID != nil {
|
||||
return *m.ID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *UpdateDataNodeCommand) GetHost() string {
|
||||
if m != nil && m.Host != nil {
|
||||
return *m.Host
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UpdateDataNodeCommand) GetTCPHost() string {
|
||||
if m != nil && m.TCPHost != nil {
|
||||
return *m.TCPHost
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var E_UpdateDataNodeCommand_Command = &proto.ExtensionDesc{
|
||||
ExtendedType: (*Command)(nil),
|
||||
ExtensionType: (*UpdateDataNodeCommand)(nil),
|
||||
Field: 126,
|
||||
Name: "internal.UpdateDataNodeCommand.command",
|
||||
Tag: "bytes,126,opt,name=command",
|
||||
}
|
||||
|
||||
type DeleteMetaNodeCommand struct {
|
||||
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DeleteMetaNodeCommand) Reset() { *m = DeleteMetaNodeCommand{} }
|
||||
func (m *DeleteMetaNodeCommand) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteMetaNodeCommand) ProtoMessage() {}
|
||||
|
||||
func (m *DeleteMetaNodeCommand) GetID() uint64 {
|
||||
if m != nil && m.ID != nil {
|
||||
return *m.ID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var E_DeleteMetaNodeCommand_Command = &proto.ExtensionDesc{
|
||||
ExtendedType: (*Command)(nil),
|
||||
ExtensionType: (*DeleteMetaNodeCommand)(nil),
|
||||
Field: 127,
|
||||
Name: "internal.DeleteMetaNodeCommand.command",
|
||||
Tag: "bytes,127,opt,name=command",
|
||||
}
|
||||
|
||||
type DeleteDataNodeCommand struct {
|
||||
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DeleteDataNodeCommand) Reset() { *m = DeleteDataNodeCommand{} }
|
||||
func (m *DeleteDataNodeCommand) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteDataNodeCommand) ProtoMessage() {}
|
||||
|
||||
func (m *DeleteDataNodeCommand) GetID() uint64 {
|
||||
if m != nil && m.ID != nil {
|
||||
return *m.ID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var E_DeleteDataNodeCommand_Command = &proto.ExtensionDesc{
|
||||
ExtendedType: (*Command)(nil),
|
||||
ExtensionType: (*DeleteDataNodeCommand)(nil),
|
||||
Field: 128,
|
||||
Name: "internal.DeleteDataNodeCommand.command",
|
||||
Tag: "bytes,128,opt,name=command",
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
OK *bool `protobuf:"varint,1,req,name=OK" json:"OK,omitempty"`
|
||||
Error *string `protobuf:"bytes,2,opt,name=Error" json:"Error,omitempty"`
|
||||
|
@ -1443,226 +1604,88 @@ func (m *Response) GetIndex() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
type ResponseHeader struct {
|
||||
OK *bool `protobuf:"varint,1,req,name=OK" json:"OK,omitempty"`
|
||||
Error *string `protobuf:"bytes,2,opt,name=Error" json:"Error,omitempty"`
|
||||
type SetMetaNodeCommand struct {
|
||||
HTTPAddr *string `protobuf:"bytes,1,req,name=HTTPAddr" json:"HTTPAddr,omitempty"`
|
||||
TCPAddr *string `protobuf:"bytes,2,req,name=TCPAddr" json:"TCPAddr,omitempty"`
|
||||
Rand *uint64 `protobuf:"varint,3,req,name=Rand" json:"Rand,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ResponseHeader) Reset() { *m = ResponseHeader{} }
|
||||
func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseHeader) ProtoMessage() {}
|
||||
func (m *SetMetaNodeCommand) Reset() { *m = SetMetaNodeCommand{} }
|
||||
func (m *SetMetaNodeCommand) String() string { return proto.CompactTextString(m) }
|
||||
func (*SetMetaNodeCommand) ProtoMessage() {}
|
||||
|
||||
func (m *ResponseHeader) GetOK() bool {
|
||||
if m != nil && m.OK != nil {
|
||||
return *m.OK
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *ResponseHeader) GetError() string {
|
||||
if m != nil && m.Error != nil {
|
||||
return *m.Error
|
||||
func (m *SetMetaNodeCommand) GetHTTPAddr() string {
|
||||
if m != nil && m.HTTPAddr != nil {
|
||||
return *m.HTTPAddr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ErrorResponse) Reset() { *m = ErrorResponse{} }
|
||||
func (m *ErrorResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ErrorResponse) ProtoMessage() {}
|
||||
|
||||
func (m *ErrorResponse) GetHeader() *ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FetchDataRequest struct {
|
||||
Index *uint64 `protobuf:"varint,1,req,name=Index" json:"Index,omitempty"`
|
||||
Term *uint64 `protobuf:"varint,2,req,name=Term" json:"Term,omitempty"`
|
||||
Blocking *bool `protobuf:"varint,3,opt,name=Blocking,def=0" json:"Blocking,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FetchDataRequest) Reset() { *m = FetchDataRequest{} }
|
||||
func (m *FetchDataRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*FetchDataRequest) ProtoMessage() {}
|
||||
|
||||
const Default_FetchDataRequest_Blocking bool = false
|
||||
|
||||
func (m *FetchDataRequest) GetIndex() uint64 {
|
||||
if m != nil && m.Index != nil {
|
||||
return *m.Index
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *FetchDataRequest) GetTerm() uint64 {
|
||||
if m != nil && m.Term != nil {
|
||||
return *m.Term
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *FetchDataRequest) GetBlocking() bool {
|
||||
if m != nil && m.Blocking != nil {
|
||||
return *m.Blocking
|
||||
}
|
||||
return Default_FetchDataRequest_Blocking
|
||||
}
|
||||
|
||||
type FetchDataResponse struct {
|
||||
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"`
|
||||
Index *uint64 `protobuf:"varint,2,req,name=Index" json:"Index,omitempty"`
|
||||
Term *uint64 `protobuf:"varint,3,req,name=Term" json:"Term,omitempty"`
|
||||
Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FetchDataResponse) Reset() { *m = FetchDataResponse{} }
|
||||
func (m *FetchDataResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*FetchDataResponse) ProtoMessage() {}
|
||||
|
||||
func (m *FetchDataResponse) GetHeader() *ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FetchDataResponse) GetIndex() uint64 {
|
||||
if m != nil && m.Index != nil {
|
||||
return *m.Index
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *FetchDataResponse) GetTerm() uint64 {
|
||||
if m != nil && m.Term != nil {
|
||||
return *m.Term
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *FetchDataResponse) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type JoinRequest struct {
|
||||
Addr *string `protobuf:"bytes,1,req,name=Addr" json:"Addr,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *JoinRequest) Reset() { *m = JoinRequest{} }
|
||||
func (m *JoinRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*JoinRequest) ProtoMessage() {}
|
||||
|
||||
func (m *JoinRequest) GetAddr() string {
|
||||
if m != nil && m.Addr != nil {
|
||||
return *m.Addr
|
||||
func (m *SetMetaNodeCommand) GetTCPAddr() string {
|
||||
if m != nil && m.TCPAddr != nil {
|
||||
return *m.TCPAddr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type JoinResponse struct {
|
||||
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"`
|
||||
EnableRaft *bool `protobuf:"varint,2,opt,name=EnableRaft" json:"EnableRaft,omitempty"`
|
||||
RaftNodes []string `protobuf:"bytes,3,rep,name=RaftNodes" json:"RaftNodes,omitempty"`
|
||||
NodeID *uint64 `protobuf:"varint,4,opt,name=NodeID" json:"NodeID,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *JoinResponse) Reset() { *m = JoinResponse{} }
|
||||
func (m *JoinResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*JoinResponse) ProtoMessage() {}
|
||||
|
||||
func (m *JoinResponse) GetHeader() *ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *JoinResponse) GetEnableRaft() bool {
|
||||
if m != nil && m.EnableRaft != nil {
|
||||
return *m.EnableRaft
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *JoinResponse) GetRaftNodes() []string {
|
||||
if m != nil {
|
||||
return m.RaftNodes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *JoinResponse) GetNodeID() uint64 {
|
||||
if m != nil && m.NodeID != nil {
|
||||
return *m.NodeID
|
||||
func (m *SetMetaNodeCommand) GetRand() uint64 {
|
||||
if m != nil && m.Rand != nil {
|
||||
return *m.Rand
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type PromoteRaftRequest struct {
|
||||
Addr *string `protobuf:"bytes,1,req,name=Addr" json:"Addr,omitempty"`
|
||||
RaftNodes []string `protobuf:"bytes,2,rep,name=RaftNodes" json:"RaftNodes,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PromoteRaftRequest) Reset() { *m = PromoteRaftRequest{} }
|
||||
func (m *PromoteRaftRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PromoteRaftRequest) ProtoMessage() {}
|
||||
|
||||
func (m *PromoteRaftRequest) GetAddr() string {
|
||||
if m != nil && m.Addr != nil {
|
||||
return *m.Addr
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PromoteRaftRequest) GetRaftNodes() []string {
|
||||
if m != nil {
|
||||
return m.RaftNodes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PromoteRaftResponse struct {
|
||||
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"`
|
||||
Success *bool `protobuf:"varint,2,opt,name=Success" json:"Success,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PromoteRaftResponse) Reset() { *m = PromoteRaftResponse{} }
|
||||
func (m *PromoteRaftResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PromoteRaftResponse) ProtoMessage() {}
|
||||
|
||||
func (m *PromoteRaftResponse) GetHeader() *ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PromoteRaftResponse) GetSuccess() bool {
|
||||
if m != nil && m.Success != nil {
|
||||
return *m.Success
|
||||
}
|
||||
return false
|
||||
var E_SetMetaNodeCommand_Command = &proto.ExtensionDesc{
|
||||
ExtendedType: (*Command)(nil),
|
||||
ExtensionType: (*SetMetaNodeCommand)(nil),
|
||||
Field: 129,
|
||||
Name: "internal.SetMetaNodeCommand.command",
|
||||
Tag: "bytes,129,opt,name=command",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("internal.RPCType", RPCType_name, RPCType_value)
|
||||
proto.RegisterType((*Data)(nil), "internal.Data")
|
||||
proto.RegisterType((*NodeInfo)(nil), "internal.NodeInfo")
|
||||
proto.RegisterType((*DatabaseInfo)(nil), "internal.DatabaseInfo")
|
||||
proto.RegisterType((*RetentionPolicyInfo)(nil), "internal.RetentionPolicyInfo")
|
||||
proto.RegisterType((*ShardGroupInfo)(nil), "internal.ShardGroupInfo")
|
||||
proto.RegisterType((*ShardInfo)(nil), "internal.ShardInfo")
|
||||
proto.RegisterType((*SubscriptionInfo)(nil), "internal.SubscriptionInfo")
|
||||
proto.RegisterType((*ShardOwner)(nil), "internal.ShardOwner")
|
||||
proto.RegisterType((*ContinuousQueryInfo)(nil), "internal.ContinuousQueryInfo")
|
||||
proto.RegisterType((*UserInfo)(nil), "internal.UserInfo")
|
||||
proto.RegisterType((*UserPrivilege)(nil), "internal.UserPrivilege")
|
||||
proto.RegisterType((*Command)(nil), "internal.Command")
|
||||
proto.RegisterType((*CreateNodeCommand)(nil), "internal.CreateNodeCommand")
|
||||
proto.RegisterType((*DeleteNodeCommand)(nil), "internal.DeleteNodeCommand")
|
||||
proto.RegisterType((*CreateDatabaseCommand)(nil), "internal.CreateDatabaseCommand")
|
||||
proto.RegisterType((*DropDatabaseCommand)(nil), "internal.DropDatabaseCommand")
|
||||
proto.RegisterType((*CreateRetentionPolicyCommand)(nil), "internal.CreateRetentionPolicyCommand")
|
||||
proto.RegisterType((*DropRetentionPolicyCommand)(nil), "internal.DropRetentionPolicyCommand")
|
||||
proto.RegisterType((*SetDefaultRetentionPolicyCommand)(nil), "internal.SetDefaultRetentionPolicyCommand")
|
||||
proto.RegisterType((*UpdateRetentionPolicyCommand)(nil), "internal.UpdateRetentionPolicyCommand")
|
||||
proto.RegisterType((*CreateShardGroupCommand)(nil), "internal.CreateShardGroupCommand")
|
||||
proto.RegisterType((*DeleteShardGroupCommand)(nil), "internal.DeleteShardGroupCommand")
|
||||
proto.RegisterType((*CreateContinuousQueryCommand)(nil), "internal.CreateContinuousQueryCommand")
|
||||
proto.RegisterType((*DropContinuousQueryCommand)(nil), "internal.DropContinuousQueryCommand")
|
||||
proto.RegisterType((*CreateUserCommand)(nil), "internal.CreateUserCommand")
|
||||
proto.RegisterType((*DropUserCommand)(nil), "internal.DropUserCommand")
|
||||
proto.RegisterType((*UpdateUserCommand)(nil), "internal.UpdateUserCommand")
|
||||
proto.RegisterType((*SetPrivilegeCommand)(nil), "internal.SetPrivilegeCommand")
|
||||
proto.RegisterType((*SetDataCommand)(nil), "internal.SetDataCommand")
|
||||
proto.RegisterType((*SetAdminPrivilegeCommand)(nil), "internal.SetAdminPrivilegeCommand")
|
||||
proto.RegisterType((*UpdateNodeCommand)(nil), "internal.UpdateNodeCommand")
|
||||
proto.RegisterType((*CreateSubscriptionCommand)(nil), "internal.CreateSubscriptionCommand")
|
||||
proto.RegisterType((*DropSubscriptionCommand)(nil), "internal.DropSubscriptionCommand")
|
||||
proto.RegisterType((*RemovePeerCommand)(nil), "internal.RemovePeerCommand")
|
||||
proto.RegisterType((*CreateMetaNodeCommand)(nil), "internal.CreateMetaNodeCommand")
|
||||
proto.RegisterType((*CreateDataNodeCommand)(nil), "internal.CreateDataNodeCommand")
|
||||
proto.RegisterType((*UpdateDataNodeCommand)(nil), "internal.UpdateDataNodeCommand")
|
||||
proto.RegisterType((*DeleteMetaNodeCommand)(nil), "internal.DeleteMetaNodeCommand")
|
||||
proto.RegisterType((*DeleteDataNodeCommand)(nil), "internal.DeleteDataNodeCommand")
|
||||
proto.RegisterType((*Response)(nil), "internal.Response")
|
||||
proto.RegisterType((*SetMetaNodeCommand)(nil), "internal.SetMetaNodeCommand")
|
||||
proto.RegisterEnum("internal.Command_Type", Command_Type_name, Command_Type_value)
|
||||
proto.RegisterExtension(E_CreateNodeCommand_Command)
|
||||
proto.RegisterExtension(E_DeleteNodeCommand_Command)
|
||||
|
@ -1686,4 +1709,10 @@ func init() {
|
|||
proto.RegisterExtension(E_CreateSubscriptionCommand_Command)
|
||||
proto.RegisterExtension(E_DropSubscriptionCommand_Command)
|
||||
proto.RegisterExtension(E_RemovePeerCommand_Command)
|
||||
proto.RegisterExtension(E_CreateMetaNodeCommand_Command)
|
||||
proto.RegisterExtension(E_CreateDataNodeCommand_Command)
|
||||
proto.RegisterExtension(E_UpdateDataNodeCommand_Command)
|
||||
proto.RegisterExtension(E_DeleteMetaNodeCommand_Command)
|
||||
proto.RegisterExtension(E_DeleteDataNodeCommand_Command)
|
||||
proto.RegisterExtension(E_SetMetaNodeCommand_Command)
|
||||
}
|
|
@ -18,11 +18,16 @@ message Data {
|
|||
required uint64 MaxNodeID = 7;
|
||||
required uint64 MaxShardGroupID = 8;
|
||||
required uint64 MaxShardID = 9;
|
||||
|
||||
// added for 0.10.0
|
||||
repeated NodeInfo DataNodes = 10;
|
||||
repeated NodeInfo MetaNodes = 11;
|
||||
}
|
||||
|
||||
message NodeInfo {
|
||||
required uint64 ID = 1;
|
||||
required string Host = 2;
|
||||
optional string TCPHost = 3;
|
||||
}
|
||||
|
||||
message DatabaseInfo {
|
||||
|
@ -115,11 +120,19 @@ message Command {
|
|||
CreateSubscriptionCommand = 21;
|
||||
DropSubscriptionCommand = 22;
|
||||
RemovePeerCommand = 23;
|
||||
CreateMetaNodeCommand = 24;
|
||||
CreateDataNodeCommand = 25;
|
||||
UpdateDataNodeCommand = 26;
|
||||
DeleteMetaNodeCommand = 27;
|
||||
DeleteDataNodeCommand = 28;
|
||||
SetMetaNodeCommand = 29;
|
||||
}
|
||||
|
||||
required Type type = 1;
|
||||
}
|
||||
|
||||
// This isn't used in >= 0.10.0. Kept around for upgrade purposes. Instead
|
||||
// look at CreateDataNodeCommand and CreateMetaNodeCommand
|
||||
message CreateNodeCommand {
|
||||
extend Command {
|
||||
optional CreateNodeCommand command = 101;
|
||||
|
@ -301,77 +314,63 @@ message RemovePeerCommand {
|
|||
extend Command {
|
||||
optional RemovePeerCommand command = 123;
|
||||
}
|
||||
required uint64 ID = 1;
|
||||
optional uint64 ID = 1;
|
||||
required string Addr = 2;
|
||||
}
|
||||
|
||||
message CreateMetaNodeCommand {
|
||||
extend Command {
|
||||
optional CreateMetaNodeCommand command = 124;
|
||||
}
|
||||
required string HTTPAddr = 1;
|
||||
required string TCPAddr = 2;
|
||||
required uint64 Rand = 3;
|
||||
}
|
||||
|
||||
message CreateDataNodeCommand {
|
||||
extend Command {
|
||||
optional CreateDataNodeCommand command = 125;
|
||||
}
|
||||
required string HTTPAddr = 1;
|
||||
required string TCPAddr = 2;
|
||||
}
|
||||
|
||||
message UpdateDataNodeCommand {
|
||||
extend Command {
|
||||
optional UpdateDataNodeCommand command = 126;
|
||||
}
|
||||
required uint64 ID = 1;
|
||||
required string Host = 2;
|
||||
required string TCPHost = 3;
|
||||
}
|
||||
|
||||
message DeleteMetaNodeCommand {
|
||||
extend Command {
|
||||
optional DeleteMetaNodeCommand command = 127;
|
||||
}
|
||||
required uint64 ID = 1;
|
||||
}
|
||||
|
||||
message DeleteDataNodeCommand {
|
||||
extend Command {
|
||||
optional DeleteDataNodeCommand command = 128;
|
||||
}
|
||||
required uint64 ID = 1;
|
||||
}
|
||||
|
||||
message Response {
|
||||
required bool OK = 1;
|
||||
optional string Error = 2;
|
||||
optional uint64 Index = 3;
|
||||
}
|
||||
|
||||
|
||||
//========================================================================
|
||||
//
|
||||
// RPC - higher-level cluster communication operations
|
||||
//
|
||||
//========================================================================
|
||||
|
||||
enum RPCType {
|
||||
Error = 1;
|
||||
FetchData = 2;
|
||||
Join = 3;
|
||||
PromoteRaft = 4;
|
||||
}
|
||||
|
||||
message ResponseHeader {
|
||||
required bool OK = 1;
|
||||
optional string Error = 2;
|
||||
}
|
||||
|
||||
message ErrorResponse {
|
||||
required ResponseHeader Header = 1;
|
||||
}
|
||||
|
||||
message FetchDataRequest {
|
||||
required uint64 Index = 1;
|
||||
required uint64 Term = 2;
|
||||
optional bool Blocking = 3 [default = false];
|
||||
}
|
||||
|
||||
message FetchDataResponse {
|
||||
required ResponseHeader Header = 1;
|
||||
required uint64 Index = 2;
|
||||
required uint64 Term = 3;
|
||||
optional bytes Data = 4;
|
||||
}
|
||||
|
||||
message JoinRequest {
|
||||
required string Addr = 1;
|
||||
}
|
||||
|
||||
message JoinResponse {
|
||||
required ResponseHeader Header = 1;
|
||||
|
||||
// Indicates that this node should take part in the raft cluster.
|
||||
optional bool EnableRaft = 2;
|
||||
|
||||
// The addresses of raft peers to use if joining as a raft member. If not joining
|
||||
// as a raft member, these are the nodes running raft.
|
||||
repeated string RaftNodes = 3;
|
||||
|
||||
// The node ID assigned to the requesting node.
|
||||
optional uint64 NodeID = 4;
|
||||
}
|
||||
|
||||
message PromoteRaftRequest {
|
||||
required string Addr = 1;
|
||||
repeated string RaftNodes = 2;
|
||||
}
|
||||
|
||||
message PromoteRaftResponse {
|
||||
required ResponseHeader Header = 1;
|
||||
|
||||
optional bool Success = 2;
|
||||
// SetMetaNodeCommand is for the initial metanode in a cluster or
|
||||
// if the single host restarts and its hostname changes, this will update it
|
||||
message SetMetaNodeCommand {
|
||||
extend Command {
|
||||
optional SetMetaNodeCommand command = 129;
|
||||
}
|
||||
required string HTTPAddr = 1;
|
||||
required string TCPAddr = 2;
|
||||
required uint64 Rand = 3;
|
||||
}
|
|
@ -0,0 +1,328 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/hashicorp/raft-boltdb"
|
||||
)
|
||||
|
||||
// Raft configuration.
|
||||
const (
|
||||
raftLogCacheSize = 512
|
||||
raftSnapshotsRetained = 2
|
||||
raftTransportMaxPool = 3
|
||||
raftTransportTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// raftState is a consensus strategy that uses a local raft implementation for
|
||||
// consensus operations.
|
||||
type raftState struct {
|
||||
wg sync.WaitGroup
|
||||
config *Config
|
||||
closing chan struct{}
|
||||
raft *raft.Raft
|
||||
transport *raft.NetworkTransport
|
||||
peerStore raft.PeerStore
|
||||
raftStore *raftboltdb.BoltStore
|
||||
raftLayer *raftLayer
|
||||
ln net.Listener
|
||||
addr string
|
||||
logger *log.Logger
|
||||
path string
|
||||
}
|
||||
|
||||
func newRaftState(c *Config, addr string) *raftState {
|
||||
return &raftState{
|
||||
config: c,
|
||||
addr: addr,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *raftState) open(s *store, ln net.Listener, initializePeers []string) error {
|
||||
r.ln = ln
|
||||
r.closing = make(chan struct{})
|
||||
|
||||
// Setup raft configuration.
|
||||
config := raft.DefaultConfig()
|
||||
config.LogOutput = ioutil.Discard
|
||||
|
||||
if r.config.ClusterTracing {
|
||||
config.Logger = r.logger
|
||||
}
|
||||
config.HeartbeatTimeout = time.Duration(r.config.HeartbeatTimeout)
|
||||
config.ElectionTimeout = time.Duration(r.config.ElectionTimeout)
|
||||
config.LeaderLeaseTimeout = time.Duration(r.config.LeaderLeaseTimeout)
|
||||
config.CommitTimeout = time.Duration(r.config.CommitTimeout)
|
||||
// Since we actually never call `removePeer` this is safe.
|
||||
// If in the future we decide to call remove peer we have to re-evaluate how to handle this
|
||||
config.ShutdownOnRemove = false
|
||||
|
||||
// Build raft layer to multiplex listener.
|
||||
r.raftLayer = newRaftLayer(r.addr, r.ln)
|
||||
|
||||
// Create a transport layer
|
||||
r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, config.LogOutput)
|
||||
|
||||
// Create peer storage.
|
||||
r.peerStore = raft.NewJSONPeers(r.path, r.transport)
|
||||
|
||||
// This server is joining the raft cluster for the first time if initializePeers are passed in
|
||||
if len(initializePeers) > 0 {
|
||||
if err := r.peerStore.SetPeers(initializePeers); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
peers, err := r.peerStore.Peers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If no peers are set in the config or there is one and we are it, then start as a single server.
|
||||
if len(peers) <= 1 {
|
||||
config.EnableSingleNode = true
|
||||
|
||||
// Ensure we can always become the leader
|
||||
config.DisableBootstrapAfterElect = false
|
||||
|
||||
// For single-node clusters, we can update the raft peers before we start the cluster
|
||||
// just in case the hostname has changed.
|
||||
if err := r.peerStore.SetPeers([]string{r.addr}); err != nil {
|
||||
return err
|
||||
}
|
||||
peers = []string{r.addr}
|
||||
}
|
||||
|
||||
// If we have multiple nodes in the cluster, make sure our address is in the raft peers or
|
||||
// we won't be able to boot into the cluster because the other peers will reject our new hostname. This
|
||||
// is difficult to resolve automatically because we need to have all the raft peers agree on the current members
|
||||
// of the cluster before we can change them.
|
||||
if len(peers) > 0 && !raft.PeerContained(peers, r.addr) {
|
||||
r.logger.Printf("%s is not in the list of raft peers. Please update %v/peers.json on all raft nodes to have the same contents.", r.addr, r.path)
|
||||
return fmt.Errorf("peers out of sync: %v not in %v", r.addr, peers)
|
||||
}
|
||||
|
||||
// Create the log store and stable store.
|
||||
store, err := raftboltdb.NewBoltStore(filepath.Join(r.path, "raft.db"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("new bolt store: %s", err)
|
||||
}
|
||||
r.raftStore = store
|
||||
|
||||
// Create the snapshot store.
|
||||
snapshots, err := raft.NewFileSnapshotStore(r.path, raftSnapshotsRetained, os.Stderr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("file snapshot store: %s", err)
|
||||
}
|
||||
|
||||
// Create raft log.
|
||||
ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("new raft: %s", err)
|
||||
}
|
||||
r.raft = ra
|
||||
|
||||
r.wg.Add(1)
|
||||
go r.logLeaderChanges()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *raftState) logLeaderChanges() {
|
||||
defer r.wg.Done()
|
||||
// Logs our current state (Node at 1.2.3.4:8088 [Follower])
|
||||
r.logger.Printf(r.raft.String())
|
||||
for {
|
||||
select {
|
||||
case <-r.closing:
|
||||
return
|
||||
case <-r.raft.LeaderCh():
|
||||
peers, err := r.peers()
|
||||
if err != nil {
|
||||
r.logger.Printf("failed to lookup peers: %v", err)
|
||||
}
|
||||
r.logger.Printf("%v. peers=%v", r.raft.String(), peers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *raftState) close() error {
|
||||
if r.closing != nil {
|
||||
close(r.closing)
|
||||
}
|
||||
r.wg.Wait()
|
||||
|
||||
if r.transport != nil {
|
||||
r.transport.Close()
|
||||
r.transport = nil
|
||||
}
|
||||
|
||||
// Shutdown raft.
|
||||
if r.raft != nil {
|
||||
if err := r.raft.Shutdown().Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
r.raft = nil
|
||||
}
|
||||
|
||||
if r.raftStore != nil {
|
||||
r.raftStore.Close()
|
||||
r.raftStore = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// apply applies a serialized command to the raft log.
|
||||
func (r *raftState) apply(b []byte) error {
|
||||
// Apply to raft log.
|
||||
f := r.raft.Apply(b, 0)
|
||||
if err := f.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Return response if it's an error.
|
||||
// No other non-nil objects should be returned.
|
||||
resp := f.Response()
|
||||
if err, ok := resp.(error); ok {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
panic(fmt.Sprintf("unexpected response: %#v", resp))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *raftState) lastIndex() uint64 {
|
||||
return r.raft.LastIndex()
|
||||
}
|
||||
|
||||
func (r *raftState) snapshot() error {
|
||||
future := r.raft.Snapshot()
|
||||
return future.Error()
|
||||
}
|
||||
|
||||
// addPeer adds addr to the list of peers in the cluster.
|
||||
func (r *raftState) addPeer(addr string) error {
|
||||
// peers, err := r.peerStore.Peers()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// peers = append(peers, addr)
|
||||
// if fut := r.raft.SetPeers(peers); fut.Error() != nil {
|
||||
// return fut.Error()
|
||||
// }
|
||||
|
||||
peers, err := r.peerStore.Peers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, p := range peers {
|
||||
if addr == p {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if fut := r.raft.AddPeer(addr); fut.Error() != nil {
|
||||
return fut.Error()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removePeer removes addr from the list of peers in the cluster.
|
||||
func (r *raftState) removePeer(addr string) error {
|
||||
// Only do this on the leader
|
||||
if !r.isLeader() {
|
||||
return errors.New("not the leader")
|
||||
}
|
||||
if fut := r.raft.RemovePeer(addr); fut.Error() != nil {
|
||||
return fut.Error()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *raftState) peers() ([]string, error) {
|
||||
return r.peerStore.Peers()
|
||||
}
|
||||
|
||||
func (r *raftState) leader() string {
|
||||
if r.raft == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return r.raft.Leader()
|
||||
}
|
||||
|
||||
func (r *raftState) isLeader() bool {
|
||||
if r.raft == nil {
|
||||
return false
|
||||
}
|
||||
return r.raft.State() == raft.Leader
|
||||
}
|
||||
|
||||
// raftLayer wraps the connection so it can be re-used for forwarding.
|
||||
type raftLayer struct {
|
||||
addr *raftLayerAddr
|
||||
ln net.Listener
|
||||
conn chan net.Conn
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
type raftLayerAddr struct {
|
||||
addr string
|
||||
}
|
||||
|
||||
func (r *raftLayerAddr) Network() string {
|
||||
return "tcp"
|
||||
}
|
||||
|
||||
func (r *raftLayerAddr) String() string {
|
||||
return r.addr
|
||||
}
|
||||
|
||||
// newRaftLayer returns a new instance of raftLayer.
|
||||
func newRaftLayer(addr string, ln net.Listener) *raftLayer {
|
||||
return &raftLayer{
|
||||
addr: &raftLayerAddr{addr},
|
||||
ln: ln,
|
||||
conn: make(chan net.Conn),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Addr returns the local address for the layer.
|
||||
func (l *raftLayer) Addr() net.Addr {
|
||||
return l.addr
|
||||
}
|
||||
|
||||
// Dial creates a new network connection.
|
||||
func (l *raftLayer) Dial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
conn, err := net.DialTimeout("tcp", addr, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Write a marker byte for raft messages.
|
||||
_, err = conn.Write([]byte{MuxHeader})
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// Accept waits for the next connection.
|
||||
func (l *raftLayer) Accept() (net.Conn, error) { return l.ln.Accept() }
|
||||
|
||||
// Close closes the layer.
|
||||
func (l *raftLayer) Close() error { return l.ln.Close() }
|
|
@ -0,0 +1,161 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type loggingResponseWriter interface {
|
||||
http.ResponseWriter
|
||||
Status() int
|
||||
Size() int
|
||||
}
|
||||
|
||||
// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP status
|
||||
// code and body size
|
||||
type responseLogger struct {
|
||||
w http.ResponseWriter
|
||||
status int
|
||||
size int
|
||||
}
|
||||
|
||||
func (l *responseLogger) CloseNotify() <-chan bool {
|
||||
if notifier, ok := l.w.(http.CloseNotifier); ok {
|
||||
return notifier.CloseNotify()
|
||||
}
|
||||
// needed for response recorder for testing
|
||||
return make(<-chan bool)
|
||||
}
|
||||
|
||||
func (l *responseLogger) Header() http.Header {
|
||||
return l.w.Header()
|
||||
}
|
||||
|
||||
func (l *responseLogger) Flush() {
|
||||
l.w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (l *responseLogger) Write(b []byte) (int, error) {
|
||||
if l.status == 0 {
|
||||
// Set status if WriteHeader has not been called
|
||||
l.status = http.StatusOK
|
||||
}
|
||||
size, err := l.w.Write(b)
|
||||
l.size += size
|
||||
return size, err
|
||||
}
|
||||
|
||||
func (l *responseLogger) WriteHeader(s int) {
|
||||
l.w.WriteHeader(s)
|
||||
l.status = s
|
||||
}
|
||||
|
||||
func (l *responseLogger) Status() int {
|
||||
if l.status == 0 {
|
||||
// This can happen if we never actually write data, but only set response headers.
|
||||
l.status = http.StatusOK
|
||||
}
|
||||
return l.status
|
||||
}
|
||||
|
||||
func (l *responseLogger) Size() int {
|
||||
return l.size
|
||||
}
|
||||
|
||||
// redact any occurrence of a password parameter, 'p'
|
||||
func redactPassword(r *http.Request) {
|
||||
q := r.URL.Query()
|
||||
if p := q.Get("p"); p != "" {
|
||||
q.Set("p", "[REDACTED]")
|
||||
r.URL.RawQuery = q.Encode()
|
||||
}
|
||||
}
|
||||
|
||||
// Common Log Format: http://en.wikipedia.org/wiki/Common_Log_Format
|
||||
|
||||
// buildLogLine creates a common log format
|
||||
// in addition to the common fields, we also append referrer, user agent and request ID
|
||||
func buildLogLine(l *responseLogger, r *http.Request, start time.Time) string {
|
||||
|
||||
redactPassword(r)
|
||||
|
||||
username := parseUsername(r)
|
||||
|
||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
|
||||
if err != nil {
|
||||
host = r.RemoteAddr
|
||||
}
|
||||
|
||||
uri := r.URL.RequestURI()
|
||||
|
||||
referer := r.Referer()
|
||||
|
||||
userAgent := r.UserAgent()
|
||||
|
||||
fields := []string{
|
||||
host,
|
||||
"-",
|
||||
detect(username, "-"),
|
||||
fmt.Sprintf("[%s]", start.Format("02/Jan/2006:15:04:05 -0700")),
|
||||
r.Method,
|
||||
uri,
|
||||
r.Proto,
|
||||
detect(strconv.Itoa(l.Status()), "-"),
|
||||
strconv.Itoa(l.Size()),
|
||||
detect(referer, "-"),
|
||||
detect(userAgent, "-"),
|
||||
r.Header.Get("Request-Id"),
|
||||
fmt.Sprintf("%s", time.Since(start)),
|
||||
}
|
||||
|
||||
return strings.Join(fields, " ")
|
||||
}
|
||||
|
||||
// detect detects the first presense of a non blank string and returns it
|
||||
func detect(values ...string) string {
|
||||
for _, v := range values {
|
||||
if v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// parses the username either from the url or auth header
|
||||
func parseUsername(r *http.Request) string {
|
||||
var (
|
||||
username = ""
|
||||
url = r.URL
|
||||
)
|
||||
|
||||
// get username from the url if passed there
|
||||
if url.User != nil {
|
||||
if name := url.User.Username(); name != "" {
|
||||
username = name
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get the username from the query param 'u'
|
||||
q := url.Query()
|
||||
if u := q.Get("u"); u != "" {
|
||||
username = u
|
||||
}
|
||||
|
||||
// Try to get it from the authorization header if set there
|
||||
if username == "" {
|
||||
if u, _, ok := r.BasicAuth(); ok {
|
||||
username = u
|
||||
}
|
||||
}
|
||||
return username
|
||||
}
|
||||
|
||||
// Sanitize passwords from query string for logging.
|
||||
func sanitize(r *http.Request, s string) {
|
||||
r.URL.RawQuery = strings.Replace(r.URL.RawQuery, s, "[REDACTED]", -1)
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
MuxHeader = 8
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
RaftListener net.Listener
|
||||
|
||||
config *Config
|
||||
handler *handler
|
||||
ln net.Listener
|
||||
httpAddr string
|
||||
raftAddr string
|
||||
https bool
|
||||
cert string
|
||||
err chan error
|
||||
Logger *log.Logger
|
||||
store *store
|
||||
}
|
||||
|
||||
// NewService returns a new instance of Service.
|
||||
func NewService(c *Config) *Service {
|
||||
s := &Service{
|
||||
config: c,
|
||||
httpAddr: c.DefaultedHTTPBindAddress(),
|
||||
raftAddr: c.DefaultedBindAddress(),
|
||||
https: c.HTTPSEnabled,
|
||||
cert: c.HTTPSCertificate,
|
||||
err: make(chan error),
|
||||
}
|
||||
if c.LoggingEnabled {
|
||||
s.Logger = log.New(os.Stderr, "[meta] ", log.LstdFlags)
|
||||
} else {
|
||||
s.Logger = log.New(ioutil.Discard, "", 0)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Open starts the service
|
||||
func (s *Service) Open() error {
|
||||
s.Logger.Println("Starting meta service")
|
||||
|
||||
if s.RaftListener == nil {
|
||||
panic("no raft listener set")
|
||||
}
|
||||
|
||||
// Open listener.
|
||||
if s.https {
|
||||
cert, err := tls.LoadX509KeyPair(s.cert, s.cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
listener, err := tls.Listen("tcp", s.httpAddr, &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Logger.Println("Listening on HTTPS:", listener.Addr().String())
|
||||
s.ln = listener
|
||||
} else {
|
||||
listener, err := net.Listen("tcp", s.httpAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Logger.Println("Listening on HTTP:", listener.Addr().String())
|
||||
s.ln = listener
|
||||
}
|
||||
|
||||
// wait for the listeners to start
|
||||
timeout := time.Now().Add(raftListenerStartupTimeout)
|
||||
for {
|
||||
if s.ln.Addr() != nil && s.RaftListener.Addr() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("unable to open without http listener running")
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
var err error
|
||||
if autoAssignPort(s.httpAddr) {
|
||||
s.httpAddr, err = combineHostAndAssignedPort(s.ln, s.httpAddr)
|
||||
}
|
||||
if autoAssignPort(s.raftAddr) {
|
||||
s.raftAddr, err = combineHostAndAssignedPort(s.RaftListener, s.raftAddr)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open the store
|
||||
s.store = newStore(s.config, s.httpAddr, s.raftAddr)
|
||||
if err := s.store.open(s.RaftListener); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handler := newHandler(s.config, s)
|
||||
handler.logger = s.Logger
|
||||
handler.store = s.store
|
||||
s.handler = handler
|
||||
|
||||
// Begin listening for requests in a separate goroutine.
|
||||
go s.serve()
|
||||
return nil
|
||||
}
|
||||
|
||||
// serve serves the handler from the listener.
|
||||
func (s *Service) serve() {
|
||||
// The listener was closed so exit
|
||||
// See https://github.com/golang/go/issues/4373
|
||||
err := http.Serve(s.ln, s.handler)
|
||||
if err != nil && !strings.Contains(err.Error(), "closed") {
|
||||
s.err <- fmt.Errorf("listener failed: addr=%s, err=%s", s.ln.Addr(), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the underlying listener.
|
||||
func (s *Service) Close() error {
|
||||
if err := s.handler.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.store.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.ln != nil {
|
||||
if err := s.ln.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HTTPAddr returns the bind address for the HTTP API
|
||||
func (s *Service) HTTPAddr() string {
|
||||
return s.httpAddr
|
||||
}
|
||||
|
||||
// RaftAddr returns the bind address for the Raft TCP listener
|
||||
func (s *Service) RaftAddr() string {
|
||||
return s.raftAddr
|
||||
}
|
||||
|
||||
// Err returns a channel for fatal errors that occur on the listener.
|
||||
func (s *Service) Err() <-chan error { return s.err }
|
||||
|
||||
// SetLogger sets the internal logger to the logger passed in.
|
||||
func (s *Service) SetLogger(l *log.Logger) {
|
||||
s.Logger = l
|
||||
}
|
||||
|
||||
func autoAssignPort(addr string) bool {
|
||||
_, p, _ := net.SplitHostPort(addr)
|
||||
return p == "0"
|
||||
}
|
||||
|
||||
func combineHostAndAssignedPort(ln net.Listener, autoAddr string) (string, error) {
|
||||
host, _, err := net.SplitHostPort(autoAddr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, port, err := net.SplitHostPort(ln.Addr().String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return net.JoinHostPort(host, port), nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -14,25 +14,24 @@ import (
|
|||
// StatementExecutor translates InfluxQL queries to meta store methods.
|
||||
type StatementExecutor struct {
|
||||
Store interface {
|
||||
Node(id uint64) (ni *NodeInfo, err error)
|
||||
Nodes() ([]NodeInfo, error)
|
||||
Peers() ([]string, error)
|
||||
Leader() string
|
||||
DataNode(id uint64) (ni *NodeInfo, err error)
|
||||
DataNodes() ([]NodeInfo, error)
|
||||
MetaNodes() ([]NodeInfo, error)
|
||||
DeleteDataNode(nodeID uint64) error
|
||||
DeleteMetaNode(nodeID uint64) error
|
||||
|
||||
DeleteNode(nodeID uint64, force bool) error
|
||||
Database(name string) (*DatabaseInfo, error)
|
||||
Databases() ([]DatabaseInfo, error)
|
||||
CreateDatabase(name string) (*DatabaseInfo, error)
|
||||
CreateDatabaseWithRetentionPolicy(name string, rpi *RetentionPolicyInfo) (*DatabaseInfo, error)
|
||||
DropDatabase(name string) error
|
||||
|
||||
DefaultRetentionPolicy(database string) (*RetentionPolicyInfo, error)
|
||||
CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) (*RetentionPolicyInfo, error)
|
||||
UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate) error
|
||||
SetDefaultRetentionPolicy(database, name string) error
|
||||
DropRetentionPolicy(database, name string) error
|
||||
|
||||
Users() ([]UserInfo, error)
|
||||
Users() []UserInfo
|
||||
CreateUser(name, password string, admin bool) (*UserInfo, error)
|
||||
UpdateUser(name, password string) error
|
||||
DropUser(name string) error
|
||||
|
@ -121,14 +120,16 @@ func (e *StatementExecutor) executeCreateDatabaseStatement(q *influxql.CreateDat
|
|||
} else {
|
||||
_, err = e.Store.CreateDatabase(q.Name)
|
||||
}
|
||||
if err == ErrDatabaseExists && q.IfNotExists {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
|
||||
func (e *StatementExecutor) executeDropDatabaseStatement(q *influxql.DropDatabaseStatement) *influxql.Result {
|
||||
if q.IfExists {
|
||||
if db, _ := e.Store.Database(q.Name); db == nil {
|
||||
return &influxql.Result{}
|
||||
}
|
||||
}
|
||||
return &influxql.Result{Err: e.Store.DropDatabase(q.Name)}
|
||||
}
|
||||
|
||||
|
@ -159,35 +160,39 @@ func (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGr
|
|||
}
|
||||
|
||||
func (e *StatementExecutor) executeShowServersStatement(q *influxql.ShowServersStatement) *influxql.Result {
|
||||
nis, err := e.Store.Nodes()
|
||||
nis, err := e.Store.DataNodes()
|
||||
if err != nil {
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
|
||||
peers, err := e.Store.Peers()
|
||||
if err != nil {
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
|
||||
leader := e.Store.Leader()
|
||||
|
||||
row := &models.Row{Columns: []string{"id", "cluster_addr", "raft", "raft-leader"}}
|
||||
dataNodes := &models.Row{Columns: []string{"id", "http_addr", "tcp_addr"}}
|
||||
dataNodes.Name = "data_nodes"
|
||||
for _, ni := range nis {
|
||||
row.Values = append(row.Values, []interface{}{ni.ID, ni.Host, contains(peers, ni.Host), leader == ni.Host})
|
||||
dataNodes.Values = append(dataNodes.Values, []interface{}{ni.ID, ni.Host, ni.TCPHost})
|
||||
}
|
||||
return &influxql.Result{Series: []*models.Row{row}}
|
||||
|
||||
nis, err = e.Store.MetaNodes()
|
||||
if err != nil {
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
|
||||
metaNodes := &models.Row{Columns: []string{"id", "http_addr", "tcp_addr"}}
|
||||
metaNodes.Name = "meta_nodes"
|
||||
for _, ni := range nis {
|
||||
metaNodes.Values = append(metaNodes.Values, []interface{}{ni.ID, ni.Host, ni.TCPHost})
|
||||
}
|
||||
|
||||
return &influxql.Result{Series: []*models.Row{dataNodes, metaNodes}}
|
||||
}
|
||||
|
||||
func (e *StatementExecutor) executeDropServerStatement(q *influxql.DropServerStatement) *influxql.Result {
|
||||
ni, err := e.Store.Node(q.NodeID)
|
||||
if err != nil {
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
if ni == nil {
|
||||
return &influxql.Result{Err: ErrNodeNotFound}
|
||||
var err error
|
||||
if q.Meta {
|
||||
err = e.Store.DeleteMetaNode(q.NodeID)
|
||||
} else {
|
||||
err = e.Store.DeleteDataNode(q.NodeID)
|
||||
}
|
||||
|
||||
err = e.Store.DeleteNode(q.NodeID, q.Force)
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
|
||||
|
@ -205,10 +210,7 @@ func (e *StatementExecutor) executeDropUserStatement(q *influxql.DropUserStateme
|
|||
}
|
||||
|
||||
func (e *StatementExecutor) executeShowUsersStatement(q *influxql.ShowUsersStatement) *influxql.Result {
|
||||
uis, err := e.Store.Users()
|
||||
if err != nil {
|
||||
return &influxql.Result{Err: err}
|
||||
}
|
||||
uis := e.Store.Users()
|
||||
|
||||
row := &models.Row{Columns: []string{"user", "admin"}}
|
||||
for _, ui := range uis {
|
|
@ -9,8 +9,8 @@ import (
|
|||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// Ensure a CREATE DATABASE statement can be executed.
|
||||
|
@ -145,49 +145,6 @@ func TestStatementExecutor_ExecuteStatement_ShowServers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Ensure a DROP SERVER statement can be executed.
|
||||
func TestStatementExecutor_ExecuteStatement_DropServer(t *testing.T) {
|
||||
e := NewStatementExecutor()
|
||||
e.Store.PeersFn = func() ([]string, error) {
|
||||
return []string{"node1"}, nil
|
||||
}
|
||||
|
||||
// Ensure non-existent nodes do not cause a problem.
|
||||
e.Store.NodeFn = func(id uint64) (*meta.NodeInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
if res := e.ExecuteStatement(influxql.MustParseStatement(`DROP SERVER 666`)); res.Err != meta.ErrNodeNotFound {
|
||||
t.Fatalf("unexpected error: %s", res.Err)
|
||||
}
|
||||
|
||||
// Make a node exist.
|
||||
e.Store.NodeFn = func(id uint64) (*meta.NodeInfo, error) {
|
||||
return &meta.NodeInfo{
|
||||
ID: 1, Host: "node1",
|
||||
}, nil
|
||||
}
|
||||
|
||||
e.Store.DeleteNodeFn = func(id uint64, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure Raft nodes can be dropped.
|
||||
if res := e.ExecuteStatement(influxql.MustParseStatement(`DROP SERVER 1`)); res.Err != nil {
|
||||
t.Fatalf("unexpected error: %s", res.Err)
|
||||
}
|
||||
|
||||
// Ensure non-Raft nodes can be dropped.
|
||||
e.Store.PeersFn = func() ([]string, error) {
|
||||
return []string{"node2"}, nil
|
||||
}
|
||||
e.Store.DeleteNodeFn = func(id uint64, force bool) error {
|
||||
return nil
|
||||
}
|
||||
if res := e.ExecuteStatement(influxql.MustParseStatement(`DROP SERVER 1`)); res.Err != nil {
|
||||
t.Fatalf("unexpected error: %s", res.Err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a SHOW SERVERS statement returns errors from the store.
|
||||
func TestStatementExecutor_ExecuteStatement_ShowServers_Err(t *testing.T) {
|
||||
e := NewStatementExecutor()
|
||||
|
@ -1100,7 +1057,8 @@ type StatementExecutorStore struct {
|
|||
CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error)
|
||||
CreateDatabaseWithRetentionPolicyFn func(name string, rpi *meta.RetentionPolicyInfo) (*meta.DatabaseInfo, error)
|
||||
DropDatabaseFn func(name string) error
|
||||
DeleteNodeFn func(nodeID uint64, force bool) error
|
||||
DeleteDataNodeFn func(nodeID uint64) error
|
||||
DeleteMetaNodeFn func(nodeID uint64) error
|
||||
DefaultRetentionPolicyFn func(database string) (*meta.RetentionPolicyInfo, error)
|
||||
CreateRetentionPolicyFn func(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error)
|
||||
UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate) error
|
||||
|
@ -1121,27 +1079,24 @@ type StatementExecutorStore struct {
|
|||
DropSubscriptionFn func(database, rp, name string) error
|
||||
}
|
||||
|
||||
func (s *StatementExecutorStore) Node(id uint64) (*meta.NodeInfo, error) {
|
||||
func (s *StatementExecutorStore) DataNode(id uint64) (*meta.NodeInfo, error) {
|
||||
return s.NodeFn(id)
|
||||
}
|
||||
|
||||
func (s *StatementExecutorStore) Nodes() ([]meta.NodeInfo, error) {
|
||||
func (s *StatementExecutorStore) DataNodes() ([]meta.NodeInfo, error) {
|
||||
return s.NodesFn()
|
||||
}
|
||||
|
||||
func (s *StatementExecutorStore) Peers() ([]string, error) {
|
||||
return s.PeersFn()
|
||||
func (s *StatementExecutorStore) MetaNodes() ([]meta.NodeInfo, error) {
|
||||
return s.NodesFn()
|
||||
}
|
||||
|
||||
func (s *StatementExecutorStore) Leader() string {
|
||||
if s.LeaderFn != nil {
|
||||
return s.LeaderFn()
|
||||
}
|
||||
return ""
|
||||
func (s *StatementExecutorStore) DeleteDataNode(nodeID uint64) error {
|
||||
return s.DeleteDataNodeFn(nodeID)
|
||||
}
|
||||
|
||||
func (s *StatementExecutorStore) DeleteNode(nodeID uint64, force bool) error {
|
||||
return s.DeleteNodeFn(nodeID, force)
|
||||
func (s *StatementExecutorStore) DeleteMetaNode(nodeID uint64) error {
|
||||
return s.DeleteMetaNodeFn(nodeID)
|
||||
}
|
||||
|
||||
func (s *StatementExecutorStore) Database(name string) (*meta.DatabaseInfo, error) {
|
|
@ -0,0 +1,359 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/services/meta/internal"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/hashicorp/raft"
|
||||
)
|
||||
|
||||
// Retention policy settings.
|
||||
const (
|
||||
autoCreateRetentionPolicyName = "default"
|
||||
autoCreateRetentionPolicyPeriod = 0
|
||||
|
||||
// maxAutoCreatedRetentionPolicyReplicaN is the maximum replication factor that will
|
||||
// be set for auto-created retention policies.
|
||||
maxAutoCreatedRetentionPolicyReplicaN = 3
|
||||
)
|
||||
|
||||
// Raft configuration.
|
||||
const (
|
||||
raftListenerStartupTimeout = time.Second
|
||||
)
|
||||
|
||||
type store struct {
|
||||
mu sync.RWMutex
|
||||
closing chan struct{}
|
||||
|
||||
config *Config
|
||||
data *Data
|
||||
raftState *raftState
|
||||
dataChanged chan struct{}
|
||||
path string
|
||||
opened bool
|
||||
logger *log.Logger
|
||||
|
||||
// Authentication cache.
|
||||
authCache map[string]authUser
|
||||
|
||||
raftAddr string
|
||||
httpAddr string
|
||||
}
|
||||
|
||||
type authUser struct {
|
||||
salt []byte
|
||||
hash []byte
|
||||
}
|
||||
|
||||
// newStore will create a new metastore with the passed in config
|
||||
func newStore(c *Config, httpAddr, raftAddr string) *store {
|
||||
s := store{
|
||||
data: &Data{
|
||||
Index: 1,
|
||||
},
|
||||
closing: make(chan struct{}),
|
||||
dataChanged: make(chan struct{}),
|
||||
path: c.Dir,
|
||||
config: c,
|
||||
httpAddr: httpAddr,
|
||||
raftAddr: raftAddr,
|
||||
}
|
||||
if c.LoggingEnabled {
|
||||
s.logger = log.New(os.Stderr, "[metastore] ", log.LstdFlags)
|
||||
} else {
|
||||
s.logger = log.New(ioutil.Discard, "", 0)
|
||||
}
|
||||
|
||||
return &s
|
||||
}
|
||||
|
||||
// open opens and initializes the raft store.
|
||||
func (s *store) open(raftln net.Listener) error {
|
||||
s.logger.Printf("Using data dir: %v", s.path)
|
||||
|
||||
// See if this server needs to join the raft consensus group
|
||||
var initializePeers []string
|
||||
if len(s.config.JoinPeers) > 0 {
|
||||
c := NewClient(s.config.JoinPeers, s.config.HTTPSEnabled)
|
||||
data := c.retryUntilSnapshot(0)
|
||||
for _, n := range data.MetaNodes {
|
||||
initializePeers = append(initializePeers, n.TCPHost)
|
||||
}
|
||||
initializePeers = append(initializePeers, s.raftAddr)
|
||||
}
|
||||
|
||||
if err := func() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Check if store has already been opened.
|
||||
if s.opened {
|
||||
return ErrStoreOpen
|
||||
}
|
||||
s.opened = true
|
||||
|
||||
// Create the root directory if it doesn't already exist.
|
||||
if err := os.MkdirAll(s.path, 0777); err != nil {
|
||||
return fmt.Errorf("mkdir all: %s", err)
|
||||
}
|
||||
|
||||
// Open the raft store.
|
||||
if err := s.openRaft(initializePeers, raftln); err != nil {
|
||||
return fmt.Errorf("raft: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(s.config.JoinPeers) > 0 {
|
||||
c := NewClient(s.config.JoinPeers, s.config.HTTPSEnabled)
|
||||
if err := c.Open(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
if err := c.JoinMetaServer(s.httpAddr, s.raftAddr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for a leader to be elected so we know the raft log is loaded
|
||||
// and up to date
|
||||
if err := s.waitForLeader(0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure this server is in the list of metanodes
|
||||
peers, err := s.raftState.peers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(peers) <= 1 {
|
||||
// we have to loop here because if the hostname has changed
|
||||
// raft will take a little bit to normalize so that this host
|
||||
// will be marked as the leader
|
||||
for {
|
||||
err := s.setMetaNode(s.httpAddr, s.raftAddr)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) openRaft(initializePeers []string, raftln net.Listener) error {
|
||||
rs := newRaftState(s.config, s.raftAddr)
|
||||
rs.logger = s.logger
|
||||
rs.path = s.path
|
||||
|
||||
if err := rs.open(s, raftln, initializePeers); err != nil {
|
||||
return err
|
||||
}
|
||||
s.raftState = rs
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) close() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-s.closing:
|
||||
// already closed
|
||||
return nil
|
||||
default:
|
||||
close(s.closing)
|
||||
return s.raftState.close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *store) snapshot() (*Data, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.data.Clone(), nil
|
||||
}
|
||||
|
||||
// afterIndex returns a channel that will be closed to signal
|
||||
// the caller when an updated snapshot is available.
|
||||
func (s *store) afterIndex(index uint64) <-chan struct{} {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
if index < s.data.Index {
|
||||
// Client needs update so return a closed channel.
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
return s.dataChanged
|
||||
}
|
||||
|
||||
// WaitForLeader sleeps until a leader is found or a timeout occurs.
|
||||
// timeout == 0 means to wait forever.
|
||||
func (s *store) waitForLeader(timeout time.Duration) error {
|
||||
// Begin timeout timer.
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
// Continually check for leader until timeout.
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-s.closing:
|
||||
return errors.New("closing")
|
||||
case <-timer.C:
|
||||
if timeout != 0 {
|
||||
return errors.New("timeout")
|
||||
}
|
||||
case <-ticker.C:
|
||||
if s.leader() != "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isLeader returns true if the store is currently the leader.
|
||||
func (s *store) isLeader() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.raftState == nil {
|
||||
return false
|
||||
}
|
||||
return s.raftState.raft.State() == raft.Leader
|
||||
}
|
||||
|
||||
// leader returns what the store thinks is the current leader. An empty
|
||||
// string indicates no leader exists.
|
||||
func (s *store) leader() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.raftState == nil {
|
||||
return ""
|
||||
}
|
||||
return s.raftState.raft.Leader()
|
||||
}
|
||||
|
||||
// leaderHTTP returns the HTTP API connection info for the metanode
|
||||
// that is the raft leader
|
||||
func (s *store) leaderHTTP() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.raftState == nil {
|
||||
return ""
|
||||
}
|
||||
l := s.raftState.raft.Leader()
|
||||
|
||||
for _, n := range s.data.MetaNodes {
|
||||
if n.TCPHost == l {
|
||||
return n.Host
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// otherMetaServersHTTP will return the HTTP bind addresses of the other
|
||||
// meta servers in the cluster
|
||||
func (s *store) otherMetaServersHTTP() []string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
var a []string
|
||||
for _, n := range s.data.MetaNodes {
|
||||
if n.TCPHost != s.raftAddr {
|
||||
a = append(a, n.Host)
|
||||
}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// index returns the current store index.
|
||||
func (s *store) index() uint64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.data.Index
|
||||
}
|
||||
|
||||
// apply applies a command to raft.
|
||||
func (s *store) apply(b []byte) error {
|
||||
return s.raftState.apply(b)
|
||||
}
|
||||
|
||||
// join adds a new server to the metaservice and raft
|
||||
func (s *store) join(n *NodeInfo) error {
|
||||
if err := s.raftState.addPeer(n.TCPHost); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.createMetaNode(n.Host, n.TCPHost)
|
||||
}
|
||||
|
||||
// leave removes a server from the metaservice and raft
|
||||
func (s *store) leave(n *NodeInfo) error {
|
||||
return s.raftState.removePeer(n.TCPHost)
|
||||
}
|
||||
|
||||
// createMetaNode is used by the join command to create the metanode int
|
||||
// the metastore
|
||||
func (s *store) createMetaNode(addr, raftAddr string) error {
|
||||
val := &internal.CreateMetaNodeCommand{
|
||||
HTTPAddr: proto.String(addr),
|
||||
TCPAddr: proto.String(raftAddr),
|
||||
Rand: proto.Uint64(uint64(rand.Int63())),
|
||||
}
|
||||
t := internal.Command_CreateMetaNodeCommand
|
||||
cmd := &internal.Command{Type: &t}
|
||||
if err := proto.SetExtension(cmd, internal.E_CreateMetaNodeCommand_Command, val); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b, err := proto.Marshal(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.apply(b)
|
||||
}
|
||||
|
||||
// setMetaNode is used when the raft group has only a single peer. It will
|
||||
// either create a metanode or update the information for the one metanode
|
||||
// that is there. It's used because hostnames can change
|
||||
func (s *store) setMetaNode(addr, raftAddr string) error {
|
||||
val := &internal.SetMetaNodeCommand{
|
||||
HTTPAddr: proto.String(addr),
|
||||
TCPAddr: proto.String(raftAddr),
|
||||
Rand: proto.Uint64(uint64(rand.Int63())),
|
||||
}
|
||||
t := internal.Command_SetMetaNodeCommand
|
||||
cmd := &internal.Command{Type: &t}
|
||||
if err := proto.SetExtension(cmd, internal.E_SetMetaNodeCommand_Command, val); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b, err := proto.Marshal(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.apply(b)
|
||||
}
|
|
@ -0,0 +1,625 @@
|
|||
package meta
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/services/meta/internal"
|
||||
)
|
||||
|
||||
// storeFSM represents the finite state machine used by Store to interact with Raft.
|
||||
type storeFSM store
|
||||
|
||||
func (fsm *storeFSM) Apply(l *raft.Log) interface{} {
|
||||
var cmd internal.Command
|
||||
if err := proto.Unmarshal(l.Data, &cmd); err != nil {
|
||||
panic(fmt.Errorf("cannot marshal command: %x", l.Data))
|
||||
}
|
||||
|
||||
// Lock the store.
|
||||
s := (*store)(fsm)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
err := func() interface{} {
|
||||
switch cmd.GetType() {
|
||||
case internal.Command_RemovePeerCommand:
|
||||
return fsm.applyRemovePeerCommand(&cmd)
|
||||
case internal.Command_CreateNodeCommand:
|
||||
// create node was in < 0.10.0 servers, we need the peers
|
||||
// list to convert to the appropriate data/meta nodes now
|
||||
peers, err := s.raftState.peers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fsm.applyCreateNodeCommand(&cmd, peers)
|
||||
case internal.Command_DeleteNodeCommand:
|
||||
return fsm.applyDeleteNodeCommand(&cmd)
|
||||
case internal.Command_CreateDatabaseCommand:
|
||||
return fsm.applyCreateDatabaseCommand(&cmd)
|
||||
case internal.Command_DropDatabaseCommand:
|
||||
return fsm.applyDropDatabaseCommand(&cmd)
|
||||
case internal.Command_CreateRetentionPolicyCommand:
|
||||
return fsm.applyCreateRetentionPolicyCommand(&cmd)
|
||||
case internal.Command_DropRetentionPolicyCommand:
|
||||
return fsm.applyDropRetentionPolicyCommand(&cmd)
|
||||
case internal.Command_SetDefaultRetentionPolicyCommand:
|
||||
return fsm.applySetDefaultRetentionPolicyCommand(&cmd)
|
||||
case internal.Command_UpdateRetentionPolicyCommand:
|
||||
return fsm.applyUpdateRetentionPolicyCommand(&cmd)
|
||||
case internal.Command_CreateShardGroupCommand:
|
||||
return fsm.applyCreateShardGroupCommand(&cmd)
|
||||
case internal.Command_DeleteShardGroupCommand:
|
||||
return fsm.applyDeleteShardGroupCommand(&cmd)
|
||||
case internal.Command_CreateContinuousQueryCommand:
|
||||
return fsm.applyCreateContinuousQueryCommand(&cmd)
|
||||
case internal.Command_DropContinuousQueryCommand:
|
||||
return fsm.applyDropContinuousQueryCommand(&cmd)
|
||||
case internal.Command_CreateSubscriptionCommand:
|
||||
return fsm.applyCreateSubscriptionCommand(&cmd)
|
||||
case internal.Command_DropSubscriptionCommand:
|
||||
return fsm.applyDropSubscriptionCommand(&cmd)
|
||||
case internal.Command_CreateUserCommand:
|
||||
return fsm.applyCreateUserCommand(&cmd)
|
||||
case internal.Command_DropUserCommand:
|
||||
return fsm.applyDropUserCommand(&cmd)
|
||||
case internal.Command_UpdateUserCommand:
|
||||
return fsm.applyUpdateUserCommand(&cmd)
|
||||
case internal.Command_SetPrivilegeCommand:
|
||||
return fsm.applySetPrivilegeCommand(&cmd)
|
||||
case internal.Command_SetAdminPrivilegeCommand:
|
||||
return fsm.applySetAdminPrivilegeCommand(&cmd)
|
||||
case internal.Command_SetDataCommand:
|
||||
return fsm.applySetDataCommand(&cmd)
|
||||
case internal.Command_UpdateNodeCommand:
|
||||
return fsm.applyUpdateNodeCommand(&cmd)
|
||||
case internal.Command_CreateMetaNodeCommand:
|
||||
return fsm.applyCreateMetaNodeCommand(&cmd)
|
||||
case internal.Command_DeleteMetaNodeCommand:
|
||||
return fsm.applyDeleteMetaNodeCommand(&cmd, s)
|
||||
case internal.Command_SetMetaNodeCommand:
|
||||
return fsm.applySetMetaNodeCommand(&cmd)
|
||||
case internal.Command_CreateDataNodeCommand:
|
||||
return fsm.applyCreateDataNodeCommand(&cmd)
|
||||
case internal.Command_DeleteDataNodeCommand:
|
||||
return fsm.applyDeleteDataNodeCommand(&cmd)
|
||||
default:
|
||||
panic(fmt.Errorf("cannot apply command: %x", l.Data))
|
||||
}
|
||||
}()
|
||||
|
||||
// Copy term and index to new metadata.
|
||||
fsm.data.Term = l.Term
|
||||
fsm.data.Index = l.Index
|
||||
|
||||
// signal that the data changed
|
||||
close(s.dataChanged)
|
||||
s.dataChanged = make(chan struct{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyRemovePeerCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_RemovePeerCommand_Command)
|
||||
v := ext.(*internal.RemovePeerCommand)
|
||||
|
||||
addr := v.GetAddr()
|
||||
|
||||
// Only do this if you are the leader
|
||||
if fsm.raftState.isLeader() {
|
||||
//Remove that node from the peer
|
||||
fsm.logger.Printf("removing peer: %s", addr)
|
||||
if err := fsm.raftState.removePeer(addr); err != nil {
|
||||
fsm.logger.Printf("error removing peer: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyCreateNodeCommand(cmd *internal.Command, peers []string) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateNodeCommand_Command)
|
||||
v := ext.(*internal.CreateNodeCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
|
||||
// CreateNode is a command from < 0.10.0 clusters. Every node in
|
||||
// those clusters would be a data node and only the nodes that are
|
||||
// in the list of peers would be meta nodes
|
||||
isMeta := false
|
||||
for _, p := range peers {
|
||||
if v.GetHost() == p {
|
||||
isMeta = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if isMeta {
|
||||
if err := other.CreateMetaNode(v.GetHost(), v.GetHost()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := other.CreateDataNode(v.GetHost(), v.GetHost()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the cluster ID hasn't been set then use the command's random number.
|
||||
if other.ClusterID == 0 {
|
||||
other.ClusterID = uint64(v.GetRand())
|
||||
}
|
||||
|
||||
fsm.data = other
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyUpdateNodeCommand was in < 0.10.0, noop this now
|
||||
func (fsm *storeFSM) applyUpdateNodeCommand(cmd *internal.Command) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyUpdateDataNodeCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateNodeCommand_Command)
|
||||
v := ext.(*internal.UpdateDataNodeCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
|
||||
node := other.DataNode(v.GetID())
|
||||
if node == nil {
|
||||
return ErrNodeNotFound
|
||||
}
|
||||
|
||||
node.Host = v.GetHost()
|
||||
node.TCPHost = v.GetTCPHost()
|
||||
|
||||
fsm.data = other
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyDeleteNodeCommand is from < 0.10.0. no op for this one
|
||||
func (fsm *storeFSM) applyDeleteNodeCommand(cmd *internal.Command) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyCreateDatabaseCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateDatabaseCommand_Command)
|
||||
v := ext.(*internal.CreateDatabaseCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.CreateDatabase(v.GetName()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := (*store)(fsm)
|
||||
if s.config.RetentionAutoCreate {
|
||||
// Read node count.
|
||||
// Retention policies must be fully replicated.
|
||||
replicaN := len(other.DataNodes)
|
||||
if replicaN > maxAutoCreatedRetentionPolicyReplicaN {
|
||||
replicaN = maxAutoCreatedRetentionPolicyReplicaN
|
||||
} else if replicaN < 1 {
|
||||
replicaN = 1
|
||||
}
|
||||
|
||||
// Create a retention policy.
|
||||
rpi := NewRetentionPolicyInfo(autoCreateRetentionPolicyName)
|
||||
rpi.ReplicaN = replicaN
|
||||
rpi.Duration = autoCreateRetentionPolicyPeriod
|
||||
if err := other.CreateRetentionPolicy(v.GetName(), rpi); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set it as the default retention policy.
|
||||
if err := other.SetDefaultRetentionPolicy(v.GetName(), autoCreateRetentionPolicyName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyDropDatabaseCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_DropDatabaseCommand_Command)
|
||||
v := ext.(*internal.DropDatabaseCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.DropDatabase(v.GetName()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyCreateRetentionPolicyCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateRetentionPolicyCommand_Command)
|
||||
v := ext.(*internal.CreateRetentionPolicyCommand)
|
||||
pb := v.GetRetentionPolicy()
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.CreateRetentionPolicy(v.GetDatabase(),
|
||||
&RetentionPolicyInfo{
|
||||
Name: pb.GetName(),
|
||||
ReplicaN: int(pb.GetReplicaN()),
|
||||
Duration: time.Duration(pb.GetDuration()),
|
||||
ShardGroupDuration: time.Duration(pb.GetShardGroupDuration()),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyDropRetentionPolicyCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_DropRetentionPolicyCommand_Command)
|
||||
v := ext.(*internal.DropRetentionPolicyCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.DropRetentionPolicy(v.GetDatabase(), v.GetName()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applySetDefaultRetentionPolicyCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_SetDefaultRetentionPolicyCommand_Command)
|
||||
v := ext.(*internal.SetDefaultRetentionPolicyCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.SetDefaultRetentionPolicy(v.GetDatabase(), v.GetName()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyUpdateRetentionPolicyCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_UpdateRetentionPolicyCommand_Command)
|
||||
v := ext.(*internal.UpdateRetentionPolicyCommand)
|
||||
|
||||
// Create update object.
|
||||
rpu := RetentionPolicyUpdate{Name: v.NewName}
|
||||
if v.Duration != nil {
|
||||
value := time.Duration(v.GetDuration())
|
||||
rpu.Duration = &value
|
||||
}
|
||||
if v.ReplicaN != nil {
|
||||
value := int(v.GetReplicaN())
|
||||
rpu.ReplicaN = &value
|
||||
}
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.UpdateRetentionPolicy(v.GetDatabase(), v.GetName(), &rpu); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyCreateShardGroupCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateShardGroupCommand_Command)
|
||||
v := ext.(*internal.CreateShardGroupCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.CreateShardGroup(v.GetDatabase(), v.GetPolicy(), time.Unix(0, v.GetTimestamp())); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyDeleteShardGroupCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_DeleteShardGroupCommand_Command)
|
||||
v := ext.(*internal.DeleteShardGroupCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.DeleteShardGroup(v.GetDatabase(), v.GetPolicy(), v.GetShardGroupID()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyCreateContinuousQueryCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateContinuousQueryCommand_Command)
|
||||
v := ext.(*internal.CreateContinuousQueryCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.CreateContinuousQuery(v.GetDatabase(), v.GetName(), v.GetQuery()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyDropContinuousQueryCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_DropContinuousQueryCommand_Command)
|
||||
v := ext.(*internal.DropContinuousQueryCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.DropContinuousQuery(v.GetDatabase(), v.GetName()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyCreateSubscriptionCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateSubscriptionCommand_Command)
|
||||
v := ext.(*internal.CreateSubscriptionCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.CreateSubscription(v.GetDatabase(), v.GetRetentionPolicy(), v.GetName(), v.GetMode(), v.GetDestinations()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyDropSubscriptionCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_DropSubscriptionCommand_Command)
|
||||
v := ext.(*internal.DropSubscriptionCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.DropSubscription(v.GetDatabase(), v.GetRetentionPolicy(), v.GetName()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyCreateUserCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateUserCommand_Command)
|
||||
v := ext.(*internal.CreateUserCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.CreateUser(v.GetName(), v.GetHash(), v.GetAdmin()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyDropUserCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_DropUserCommand_Command)
|
||||
v := ext.(*internal.DropUserCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.DropUser(v.GetName()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
delete(fsm.authCache, v.GetName())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyUpdateUserCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_UpdateUserCommand_Command)
|
||||
v := ext.(*internal.UpdateUserCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.UpdateUser(v.GetName(), v.GetHash()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
delete(fsm.authCache, v.GetName())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applySetPrivilegeCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_SetPrivilegeCommand_Command)
|
||||
v := ext.(*internal.SetPrivilegeCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.SetPrivilege(v.GetUsername(), v.GetDatabase(), influxql.Privilege(v.GetPrivilege())); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applySetAdminPrivilegeCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_SetAdminPrivilegeCommand_Command)
|
||||
v := ext.(*internal.SetAdminPrivilegeCommand)
|
||||
|
||||
// Copy data and update.
|
||||
other := fsm.data.Clone()
|
||||
if err := other.SetAdminPrivilege(v.GetUsername(), v.GetAdmin()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applySetDataCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_SetDataCommand_Command)
|
||||
v := ext.(*internal.SetDataCommand)
|
||||
|
||||
// Overwrite data.
|
||||
fsm.data = &Data{}
|
||||
fsm.data.unmarshal(v.GetData())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyCreateMetaNodeCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateMetaNodeCommand_Command)
|
||||
v := ext.(*internal.CreateMetaNodeCommand)
|
||||
|
||||
other := fsm.data.Clone()
|
||||
other.CreateMetaNode(v.GetHTTPAddr(), v.GetTCPAddr())
|
||||
|
||||
// If the cluster ID hasn't been set then use the command's random number.
|
||||
if other.ClusterID == 0 {
|
||||
other.ClusterID = uint64(v.GetRand())
|
||||
}
|
||||
|
||||
fsm.data = other
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applySetMetaNodeCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_SetMetaNodeCommand_Command)
|
||||
v := ext.(*internal.SetMetaNodeCommand)
|
||||
|
||||
other := fsm.data.Clone()
|
||||
other.SetMetaNode(v.GetHTTPAddr(), v.GetTCPAddr())
|
||||
|
||||
// If the cluster ID hasn't been set then use the command's random number.
|
||||
if other.ClusterID == 0 {
|
||||
other.ClusterID = uint64(v.GetRand())
|
||||
}
|
||||
|
||||
fsm.data = other
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyDeleteMetaNodeCommand(cmd *internal.Command, s *store) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_DeleteMetaNodeCommand_Command)
|
||||
v := ext.(*internal.DeleteMetaNodeCommand)
|
||||
|
||||
other := fsm.data.Clone()
|
||||
node := other.MetaNode(v.GetID())
|
||||
if node == nil {
|
||||
return ErrNodeNotFound
|
||||
}
|
||||
|
||||
if err := s.leave(node); err != nil && err != raft.ErrNotLeader {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := other.DeleteMetaNode(v.GetID()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyCreateDataNodeCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateDataNodeCommand_Command)
|
||||
v := ext.(*internal.CreateDataNodeCommand)
|
||||
|
||||
other := fsm.data.Clone()
|
||||
other.CreateDataNode(v.GetHTTPAddr(), v.GetTCPAddr())
|
||||
fsm.data = other
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) applyDeleteDataNodeCommand(cmd *internal.Command) interface{} {
|
||||
ext, _ := proto.GetExtension(cmd, internal.E_DeleteDataNodeCommand_Command)
|
||||
v := ext.(*internal.DeleteDataNodeCommand)
|
||||
|
||||
other := fsm.data.Clone()
|
||||
node := other.DataNode(v.GetID())
|
||||
if node == nil {
|
||||
return ErrNodeNotFound
|
||||
}
|
||||
|
||||
if err := other.DeleteDataNode(v.GetID()); err != nil {
|
||||
return err
|
||||
}
|
||||
fsm.data = other
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) Snapshot() (raft.FSMSnapshot, error) {
|
||||
s := (*store)(fsm)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return &storeFSMSnapshot{Data: (*store)(fsm).data}, nil
|
||||
}
|
||||
|
||||
func (fsm *storeFSM) Restore(r io.ReadCloser) error {
|
||||
// Read all bytes.
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode metadata.
|
||||
data := &Data{}
|
||||
if err := data.UnmarshalBinary(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set metadata on store.
|
||||
// NOTE: No lock because Hashicorp Raft doesn't call Restore concurrently
|
||||
// with any other function.
|
||||
fsm.data = data
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type storeFSMSnapshot struct {
|
||||
Data *Data
|
||||
}
|
||||
|
||||
func (s *storeFSMSnapshot) Persist(sink raft.SnapshotSink) error {
|
||||
err := func() error {
|
||||
// Encode data.
|
||||
p, err := s.Data.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write data to sink.
|
||||
if _, err := sink.Write(p); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the sink.
|
||||
if err := sink.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
sink.Cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release is invoked when we are finished with the snapshot
|
||||
func (s *storeFSMSnapshot) Release() {}
|
|
@ -18,8 +18,8 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -65,9 +65,8 @@ type Service struct {
|
|||
PointsWriter interface {
|
||||
WritePoints(p *cluster.WritePointsRequest) error
|
||||
}
|
||||
MetaStore interface {
|
||||
WaitForLeader(d time.Duration) error
|
||||
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
||||
MetaClient interface {
|
||||
CreateDatabase(name string) (*meta.DatabaseInfo, error)
|
||||
}
|
||||
|
||||
// Points received over the telnet protocol are batched.
|
||||
|
@ -119,12 +118,7 @@ func (s *Service) Open() error {
|
|||
tags := map[string]string{"bind": s.BindAddress}
|
||||
s.statMap = influxdb.NewStatistics(key, "opentsdb", tags)
|
||||
|
||||
if err := s.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil {
|
||||
s.Logger.Printf("Failed to detect a cluster leader: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := s.MetaStore.CreateDatabaseIfNotExists(s.Database); err != nil {
|
||||
if _, err := s.MetaClient.CreateDatabase(s.Database); err != nil {
|
||||
s.Logger.Printf("Failed to ensure target database %s exists: %s", s.Database, err.Error())
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -13,8 +13,8 @@ import (
|
|||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/services/opentsdb"
|
||||
)
|
||||
|
||||
|
@ -137,7 +137,7 @@ func NewService(database string) *Service {
|
|||
})
|
||||
s := &Service{Service: srv}
|
||||
s.Service.PointsWriter = &s.PointsWriter
|
||||
s.Service.MetaStore = &DatabaseCreator{}
|
||||
s.Service.MetaClient = &DatabaseCreator{}
|
||||
|
||||
if !testing.Verbose() {
|
||||
s.Logger = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
|
@ -158,10 +158,6 @@ func (w *PointsWriter) WritePoints(p *cluster.WritePointsRequest) error {
|
|||
type DatabaseCreator struct {
|
||||
}
|
||||
|
||||
func (d *DatabaseCreator) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) {
|
||||
func (d *DatabaseCreator) CreateDatabase(name string) (*meta.DatabaseInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *DatabaseCreator) WaitForLeader(t time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,8 +17,7 @@ type Service struct {
|
|||
done chan struct{}
|
||||
wg sync.WaitGroup
|
||||
|
||||
MetaStore interface {
|
||||
IsLeader() bool
|
||||
MetaClient interface {
|
||||
PrecreateShardGroups(now, cutoff time.Time) error
|
||||
}
|
||||
}
|
||||
|
@ -75,12 +74,6 @@ func (s *Service) runPrecreation() {
|
|||
for {
|
||||
select {
|
||||
case <-time.After(s.checkInterval):
|
||||
// Only run this on the leader, but always allow the loop to check
|
||||
// as the leader can change.
|
||||
if !s.MetaStore.IsLeader() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.precreate(time.Now().UTC()); err != nil {
|
||||
s.Logger.Printf("failed to precreate shards: %s", err.Error())
|
||||
}
|
||||
|
@ -94,7 +87,7 @@ func (s *Service) runPrecreation() {
|
|||
// precreate performs actual resource precreation.
|
||||
func (s *Service) precreate(now time.Time) error {
|
||||
cutoff := now.Add(s.advancePeriod).UTC()
|
||||
if err := s.MetaStore.PrecreateShardGroups(now, cutoff); err != nil {
|
||||
if err := s.MetaClient.PrecreateShardGroups(now, cutoff); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -17,7 +17,7 @@ func Test_ShardPrecreation(t *testing.T) {
|
|||
// A test metastaore which returns 2 shard groups, only 1 of which requires a successor.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
ms := metaStore{
|
||||
ms := metaClient{
|
||||
PrecreateShardGroupsFn: func(v, u time.Time) error {
|
||||
wg.Done()
|
||||
if u != now.Add(advancePeriod) {
|
||||
|
@ -34,26 +34,22 @@ func Test_ShardPrecreation(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to create shard precreation service: %s", err.Error())
|
||||
}
|
||||
srv.MetaStore = ms
|
||||
srv.MetaClient = ms
|
||||
|
||||
err = srv.precreate(now)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to precreate shards: %s", err.Error())
|
||||
}
|
||||
|
||||
wg.Wait() // Ensure metastore test function is called.
|
||||
wg.Wait() // Ensure metaClient test function is called.
|
||||
return
|
||||
}
|
||||
|
||||
// PointsWriter represents a mock impl of PointsWriter.
|
||||
type metaStore struct {
|
||||
type metaClient struct {
|
||||
PrecreateShardGroupsFn func(now, cutoff time.Time) error
|
||||
}
|
||||
|
||||
func (m metaStore) IsLeader() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m metaStore) PrecreateShardGroups(now, cutoff time.Time) error {
|
||||
func (m metaClient) PrecreateShardGroups(now, cutoff time.Time) error {
|
||||
return m.PrecreateShardGroupsFn(now, cutoff)
|
||||
}
|
||||
|
|
|
@ -6,14 +6,13 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// Service represents the retention policy enforcement service.
|
||||
type Service struct {
|
||||
MetaStore interface {
|
||||
IsLeader() bool
|
||||
VisitRetentionPolicies(f func(d meta.DatabaseInfo, r meta.RetentionPolicyInfo))
|
||||
MetaClient interface {
|
||||
Databases() ([]meta.DatabaseInfo, error)
|
||||
DeleteShardGroup(database, policy string, id uint64) error
|
||||
}
|
||||
TSDBStore interface {
|
||||
|
@ -71,24 +70,25 @@ func (s *Service) deleteShardGroups() {
|
|||
return
|
||||
|
||||
case <-ticker.C:
|
||||
// Only run this on the leader, but always allow the loop to check
|
||||
// as the leader can change.
|
||||
if !s.MetaStore.IsLeader() {
|
||||
dbs, err := s.MetaClient.Databases()
|
||||
if err != nil {
|
||||
s.logger.Printf("error getting databases: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
s.logger.Println("retention policy enforcement check commencing")
|
||||
|
||||
s.MetaStore.VisitRetentionPolicies(func(d meta.DatabaseInfo, r meta.RetentionPolicyInfo) {
|
||||
for _, g := range r.ExpiredShardGroups(time.Now().UTC()) {
|
||||
if err := s.MetaStore.DeleteShardGroup(d.Name, r.Name, g.ID); err != nil {
|
||||
s.logger.Printf("failed to delete shard group %d from database %s, retention policy %s: %s",
|
||||
g.ID, d.Name, r.Name, err.Error())
|
||||
} else {
|
||||
s.logger.Printf("deleted shard group %d from database %s, retention policy %s",
|
||||
g.ID, d.Name, r.Name)
|
||||
for _, d := range dbs {
|
||||
for _, r := range d.RetentionPolicies {
|
||||
for _, g := range r.ExpiredShardGroups(time.Now().UTC()) {
|
||||
if err := s.MetaClient.DeleteShardGroup(d.Name, r.Name, g.ID); err != nil {
|
||||
s.logger.Printf("failed to delete shard group %d from database %s, retention policy %s: %s",
|
||||
g.ID, d.Name, r.Name, err.Error())
|
||||
} else {
|
||||
s.logger.Printf("deleted shard group %d from database %s, retention policy %s",
|
||||
g.ID, d.Name, r.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -111,13 +111,19 @@ func (s *Service) deleteShards() {
|
|||
rp string
|
||||
}
|
||||
deletedShardIDs := make(map[uint64]deletionInfo, 0)
|
||||
s.MetaStore.VisitRetentionPolicies(func(d meta.DatabaseInfo, r meta.RetentionPolicyInfo) {
|
||||
for _, g := range r.DeletedShardGroups() {
|
||||
for _, sh := range g.Shards {
|
||||
deletedShardIDs[sh.ID] = deletionInfo{db: d.Name, rp: r.Name}
|
||||
dbs, err := s.MetaClient.Databases()
|
||||
if err != nil {
|
||||
s.logger.Printf("error getting databases: %s", err.Error())
|
||||
}
|
||||
for _, d := range dbs {
|
||||
for _, r := range d.RetentionPolicies {
|
||||
for _, g := range r.DeletedShardGroups() {
|
||||
for _, sh := range g.Shards {
|
||||
deletedShardIDs[sh.ID] = deletionInfo{db: d.Name, rp: r.Name}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
for _, id := range s.TSDBStore.ShardIDs() {
|
||||
if di, ok := deletedShardIDs[id]; ok {
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -24,7 +24,7 @@ type Service struct {
|
|||
wg sync.WaitGroup
|
||||
err chan error
|
||||
|
||||
MetaStore interface {
|
||||
MetaClient interface {
|
||||
encoding.BinaryMarshaler
|
||||
Database(name string) (*meta.DatabaseInfo, error)
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func (s *Service) handleConn(conn net.Conn) error {
|
|||
}
|
||||
case RequestMetastoreBackup:
|
||||
// Retrieve and serialize the current meta data.
|
||||
buf, err := s.MetaStore.MarshalBinary()
|
||||
buf, err := s.MetaClient.MarshalBinary()
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal meta: %s", err)
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ func (s *Service) handleConn(conn net.Conn) error {
|
|||
// this server into the connection
|
||||
func (s *Service) writeDatabaseInfo(conn net.Conn, database string) error {
|
||||
res := Response{}
|
||||
db, err := s.MetaStore.Database(database)
|
||||
db, err := s.MetaClient.Database(database)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ func (s *Service) writeDatabaseInfo(conn net.Conn, database string) error {
|
|||
// this server into the connection
|
||||
func (s *Service) writeRetentionPolicyInfo(conn net.Conn, database, retentionPolicy string) error {
|
||||
res := Response{}
|
||||
db, err := s.MetaStore.Database(database)
|
||||
db, err := s.MetaClient.Database(database)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// Statistics for the Subscriber service.
|
||||
|
@ -37,10 +37,10 @@ type subEntry struct {
|
|||
// to defined third party destinations.
|
||||
// Subscriptions are defined per database and retention policy.
|
||||
type Service struct {
|
||||
subs map[subEntry]PointsWriter
|
||||
MetaStore interface {
|
||||
subs map[subEntry]PointsWriter
|
||||
MetaClient interface {
|
||||
Databases() ([]meta.DatabaseInfo, error)
|
||||
WaitForDataChanged() error
|
||||
WaitForDataChanged() chan struct{}
|
||||
}
|
||||
NewPointsWriter func(u url.URL) (PointsWriter, error)
|
||||
Logger *log.Logger
|
||||
|
@ -48,6 +48,7 @@ type Service struct {
|
|||
points chan *cluster.WritePointsRequest
|
||||
wg sync.WaitGroup
|
||||
closed bool
|
||||
closing chan struct{}
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
|
@ -60,6 +61,7 @@ func NewService(c Config) *Service {
|
|||
statMap: influxdb.NewStatistics("subscriber", "subscriber", nil),
|
||||
points: make(chan *cluster.WritePointsRequest),
|
||||
closed: true,
|
||||
closing: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -68,7 +70,9 @@ func (s *Service) Open() error {
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.MetaStore == nil {
|
||||
s.closing = make(chan struct{})
|
||||
|
||||
if s.MetaClient == nil {
|
||||
panic("no meta store")
|
||||
}
|
||||
|
||||
|
@ -92,6 +96,13 @@ func (s *Service) Close() error {
|
|||
defer s.mu.Unlock()
|
||||
close(s.points)
|
||||
s.closed = true
|
||||
select {
|
||||
case <-s.closing:
|
||||
// do nothing
|
||||
default:
|
||||
close(s.closing)
|
||||
}
|
||||
|
||||
s.wg.Wait()
|
||||
s.Logger.Println("closed service")
|
||||
return nil
|
||||
|
@ -104,27 +115,28 @@ func (s *Service) SetLogger(l *log.Logger) {
|
|||
|
||||
func (s *Service) waitForMetaUpdates() {
|
||||
for {
|
||||
err := s.MetaStore.WaitForDataChanged()
|
||||
if err != nil {
|
||||
s.Logger.Printf("error while waiting for meta data changes, err: %v\n", err)
|
||||
return
|
||||
}
|
||||
//Check that we haven't been closed before performing update.
|
||||
s.mu.Lock()
|
||||
if s.closed {
|
||||
ch := s.MetaClient.WaitForDataChanged()
|
||||
select {
|
||||
case <-ch:
|
||||
//Check that we haven't been closed before performing update.
|
||||
s.mu.Lock()
|
||||
if s.closed {
|
||||
s.mu.Unlock()
|
||||
s.Logger.Println("service closed not updating")
|
||||
return
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.Logger.Println("service closed not updating")
|
||||
s.Update()
|
||||
case <-s.closing:
|
||||
return
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.Update()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Update will start new and stop deleted subscriptions.
|
||||
func (s *Service) Update() error {
|
||||
dbis, err := s.MetaStore.Databases()
|
||||
dbis, err := s.MetaClient.Databases()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -6,20 +6,20 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/services/subscriber"
|
||||
)
|
||||
|
||||
type MetaStore struct {
|
||||
type MetaClient struct {
|
||||
DatabasesFn func() ([]meta.DatabaseInfo, error)
|
||||
WaitForDataChangedFn func() error
|
||||
WaitForDataChangedFn func() chan struct{}
|
||||
}
|
||||
|
||||
func (m MetaStore) Databases() ([]meta.DatabaseInfo, error) {
|
||||
func (m MetaClient) Databases() ([]meta.DatabaseInfo, error) {
|
||||
return m.DatabasesFn()
|
||||
}
|
||||
|
||||
func (m MetaStore) WaitForDataChanged() error {
|
||||
func (m MetaClient) WaitForDataChanged() chan struct{} {
|
||||
return m.WaitForDataChangedFn()
|
||||
}
|
||||
|
||||
|
@ -32,11 +32,10 @@ func (s Subscription) WritePoints(p *cluster.WritePointsRequest) error {
|
|||
}
|
||||
|
||||
func TestService_IgnoreNonMatch(t *testing.T) {
|
||||
dataChanged := make(chan bool)
|
||||
ms := MetaStore{}
|
||||
ms.WaitForDataChangedFn = func() error {
|
||||
<-dataChanged
|
||||
return nil
|
||||
dataChanged := make(chan struct{})
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||
return []meta.DatabaseInfo{
|
||||
|
@ -67,13 +66,13 @@ func TestService_IgnoreNonMatch(t *testing.T) {
|
|||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaStore = ms
|
||||
s.MetaClient = ms
|
||||
s.NewPointsWriter = newPointsWriter
|
||||
s.Open()
|
||||
defer s.Close()
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- true
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} {
|
||||
var u url.URL
|
||||
|
@ -108,11 +107,10 @@ func TestService_IgnoreNonMatch(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestService_ModeALL(t *testing.T) {
|
||||
dataChanged := make(chan bool)
|
||||
ms := MetaStore{}
|
||||
ms.WaitForDataChangedFn = func() error {
|
||||
<-dataChanged
|
||||
return nil
|
||||
dataChanged := make(chan struct{})
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||
return []meta.DatabaseInfo{
|
||||
|
@ -143,13 +141,13 @@ func TestService_ModeALL(t *testing.T) {
|
|||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaStore = ms
|
||||
s.MetaClient = ms
|
||||
s.NewPointsWriter = newPointsWriter
|
||||
s.Open()
|
||||
defer s.Close()
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- true
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} {
|
||||
var u url.URL
|
||||
|
@ -187,11 +185,10 @@ func TestService_ModeALL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestService_ModeANY(t *testing.T) {
|
||||
dataChanged := make(chan bool)
|
||||
ms := MetaStore{}
|
||||
ms.WaitForDataChangedFn = func() error {
|
||||
<-dataChanged
|
||||
return nil
|
||||
dataChanged := make(chan struct{})
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||
return []meta.DatabaseInfo{
|
||||
|
@ -222,13 +219,13 @@ func TestService_ModeANY(t *testing.T) {
|
|||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaStore = ms
|
||||
s.MetaClient = ms
|
||||
s.NewPointsWriter = newPointsWriter
|
||||
s.Open()
|
||||
defer s.Close()
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- true
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} {
|
||||
var u url.URL
|
||||
|
@ -270,11 +267,10 @@ func TestService_ModeANY(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestService_Multiple(t *testing.T) {
|
||||
dataChanged := make(chan bool)
|
||||
ms := MetaStore{}
|
||||
ms.WaitForDataChangedFn = func() error {
|
||||
<-dataChanged
|
||||
return nil
|
||||
dataChanged := make(chan struct{})
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||
return []meta.DatabaseInfo{
|
||||
|
@ -311,13 +307,13 @@ func TestService_Multiple(t *testing.T) {
|
|||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaStore = ms
|
||||
s.MetaClient = ms
|
||||
s.NewPointsWriter = newPointsWriter
|
||||
s.Open()
|
||||
defer s.Close()
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- true
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093", "udp://h2:9093", "udp://h3:9093"} {
|
||||
var u url.URL
|
||||
|
@ -389,11 +385,10 @@ func TestService_Multiple(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestService_WaitForDataChanged(t *testing.T) {
|
||||
dataChanged := make(chan bool)
|
||||
ms := MetaStore{}
|
||||
ms.WaitForDataChangedFn = func() error {
|
||||
<-dataChanged
|
||||
return nil
|
||||
dataChanged := make(chan struct{}, 1)
|
||||
ms := MetaClient{}
|
||||
ms.WaitForDataChangedFn = func() chan struct{} {
|
||||
return dataChanged
|
||||
}
|
||||
calls := make(chan bool, 2)
|
||||
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||
|
@ -402,7 +397,7 @@ func TestService_WaitForDataChanged(t *testing.T) {
|
|||
}
|
||||
|
||||
s := subscriber.NewService(subscriber.NewConfig())
|
||||
s.MetaStore = ms
|
||||
s.MetaClient = ms
|
||||
// Explicitly closed below for testing
|
||||
s.Open()
|
||||
|
||||
|
@ -420,7 +415,7 @@ func TestService_WaitForDataChanged(t *testing.T) {
|
|||
}
|
||||
|
||||
// Signal that data has changed
|
||||
dataChanged <- true
|
||||
dataChanged <- struct{}{}
|
||||
|
||||
// Should be called once more after data changed
|
||||
select {
|
||||
|
@ -437,7 +432,7 @@ func TestService_WaitForDataChanged(t *testing.T) {
|
|||
|
||||
//Close service ensure not called
|
||||
s.Close()
|
||||
dataChanged <- true
|
||||
dataChanged <- struct{}{}
|
||||
select {
|
||||
case <-calls:
|
||||
t.Fatal("unexpected call")
|
||||
|
|
|
@ -12,8 +12,8 @@ import (
|
|||
|
||||
"github.com/influxdb/influxdb"
|
||||
"github.com/influxdb/influxdb/cluster"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -52,8 +52,8 @@ type Service struct {
|
|||
WritePoints(p *cluster.WritePointsRequest) error
|
||||
}
|
||||
|
||||
MetaStore interface {
|
||||
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
||||
MetaClient interface {
|
||||
CreateDatabase(name string) (*meta.DatabaseInfo, error)
|
||||
}
|
||||
|
||||
Logger *log.Logger
|
||||
|
@ -87,7 +87,7 @@ func (s *Service) Open() (err error) {
|
|||
return errors.New("database has to be specified in config")
|
||||
}
|
||||
|
||||
if _, err := s.MetaStore.CreateDatabaseIfNotExists(s.config.Database); err != nil {
|
||||
if _, err := s.MetaClient.CreateDatabase(s.config.Database); err != nil {
|
||||
return errors.New("Failed to ensure target database exists")
|
||||
}
|
||||
|
||||
|
|
27
tcp/mux.go
27
tcp/mux.go
|
@ -18,6 +18,7 @@ const (
|
|||
|
||||
// Mux multiplexes a network connection.
|
||||
type Mux struct {
|
||||
mu sync.RWMutex
|
||||
ln net.Listener
|
||||
m map[byte]*listener
|
||||
|
||||
|
@ -41,6 +42,9 @@ func NewMux() *Mux {
|
|||
|
||||
// Serve handles connections from ln and multiplexes then across registered listener.
|
||||
func (mux *Mux) Serve(ln net.Listener) error {
|
||||
mux.mu.Lock()
|
||||
mux.ln = ln
|
||||
mux.mu.Unlock()
|
||||
for {
|
||||
// Wait for the next connection.
|
||||
// If it returns a temporary error then simply retry.
|
||||
|
@ -112,7 +116,8 @@ func (mux *Mux) Listen(header byte) net.Listener {
|
|||
|
||||
// Create a new listener and assign it.
|
||||
ln := &listener{
|
||||
c: make(chan net.Conn),
|
||||
c: make(chan net.Conn),
|
||||
mux: mux,
|
||||
}
|
||||
mux.m[header] = ln
|
||||
|
||||
|
@ -121,7 +126,8 @@ func (mux *Mux) Listen(header byte) net.Listener {
|
|||
|
||||
// listener is a receiver for connections received by Mux.
|
||||
type listener struct {
|
||||
c chan net.Conn
|
||||
c chan net.Conn
|
||||
mux *Mux
|
||||
}
|
||||
|
||||
// Accept waits for and returns the next connection to the listener.
|
||||
|
@ -136,8 +142,21 @@ func (ln *listener) Accept() (c net.Conn, err error) {
|
|||
// Close is a no-op. The mux's listener should be closed instead.
|
||||
func (ln *listener) Close() error { return nil }
|
||||
|
||||
// Addr always returns nil.
|
||||
func (ln *listener) Addr() net.Addr { return nil }
|
||||
// Addr returns the Addr of the listener
|
||||
func (ln *listener) Addr() net.Addr {
|
||||
if ln.mux == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ln.mux.mu.RLock()
|
||||
defer ln.mux.mu.RUnlock()
|
||||
|
||||
if ln.mux.ln == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ln.mux.ln.Addr()
|
||||
}
|
||||
|
||||
// Dial connects to a remote mux listener with a given header byte.
|
||||
func Dial(network, address string, header byte) (net.Conn, error) {
|
||||
|
|
|
@ -71,8 +71,9 @@ const (
|
|||
)
|
||||
|
||||
type Config struct {
|
||||
Dir string `toml:"dir"`
|
||||
Engine string `toml:"engine"`
|
||||
Enabled bool `toml:"enabled"`
|
||||
Dir string `toml:"dir"`
|
||||
Engine string `toml:"engine"`
|
||||
|
||||
// WAL config options for b1 (introduced in 0.9.2)
|
||||
MaxWALSize int `toml:"max-wal-size"`
|
||||
|
@ -104,6 +105,7 @@ type Config struct {
|
|||
func NewConfig() Config {
|
||||
return Config{
|
||||
Engine: DefaultEngine,
|
||||
Enabled: true, // data node enabled by default
|
||||
MaxWALSize: DefaultMaxWALSize,
|
||||
WALFlushInterval: toml.Duration(DefaultWALFlushInterval),
|
||||
WALPartitionFlushDelay: toml.Duration(DefaultWALPartitionFlushDelay),
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
package tsdb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
func TestConfig_Parse(t *testing.T) {
|
||||
// Parse configuration.
|
||||
var c tsdb.Config
|
||||
if _, err := toml.Decode(`
|
||||
enabled = false
|
||||
`, &c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate configuration.
|
||||
if c.Enabled == true {
|
||||
t.Fatalf("unexpected enabled: %v", c.Enabled)
|
||||
}
|
||||
// TODO: add remaining config tests
|
||||
}
|
|
@ -9,8 +9,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
)
|
||||
|
||||
// QueryExecutor executes every statement in an influxdb Query. It is responsible for
|
||||
|
@ -19,20 +19,15 @@ import (
|
|||
// in a running process
|
||||
type QueryExecutor struct {
|
||||
// The meta store for accessing and updating cluster and schema data.
|
||||
MetaStore interface {
|
||||
MetaClient interface {
|
||||
Database(name string) (*meta.DatabaseInfo, error)
|
||||
Databases() ([]meta.DatabaseInfo, error)
|
||||
User(name string) (*meta.UserInfo, error)
|
||||
AdminUserExists() (bool, error)
|
||||
AdminUserExists() bool
|
||||
Authenticate(username, password string) (*meta.UserInfo, error)
|
||||
RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error)
|
||||
UserCount() (int, error)
|
||||
UserCount() int
|
||||
ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error)
|
||||
NodeID() uint64
|
||||
}
|
||||
|
||||
// Executes statements relating to meta data.
|
||||
MetaStatementExecutor interface {
|
||||
ExecuteStatement(stmt influxql.Statement) *influxql.Result
|
||||
}
|
||||
|
||||
|
@ -83,7 +78,7 @@ func (q *QueryExecutor) SetLogger(l *log.Logger) {
|
|||
// a root user.
|
||||
func (q *QueryExecutor) Authorize(u *meta.UserInfo, query *influxql.Query, database string) error {
|
||||
// Special case if no users exist.
|
||||
if count, err := q.MetaStore.UserCount(); count == 0 && err == nil {
|
||||
if count := q.MetaClient.UserCount(); count == 0 {
|
||||
// Ensure there is at least one statement.
|
||||
if len(query.Statements) > 0 {
|
||||
// First statement in the query must create a user with admin privilege.
|
||||
|
@ -206,7 +201,7 @@ func (q *QueryExecutor) ExecuteQuery(query *influxql.Query, database string, chu
|
|||
res = q.MonitorStatementExecutor.ExecuteStatement(stmt)
|
||||
default:
|
||||
// Delegate all other meta statements to a separate executor. They don't hit tsdb storage.
|
||||
res = q.MetaStatementExecutor.ExecuteStatement(stmt)
|
||||
res = q.MetaClient.ExecuteStatement(stmt)
|
||||
}
|
||||
|
||||
if res != nil {
|
||||
|
@ -258,7 +253,7 @@ func (q *QueryExecutor) PlanSelect(stmt *influxql.SelectStatement, chunkSize int
|
|||
|
||||
// Build the set of target shards. Using shard IDs as keys ensures each shard ID
|
||||
// occurs only once.
|
||||
shardGroups, err := q.MetaStore.ShardGroupsByTimeRange(mm.Database, mm.RetentionPolicy, tmin, tmax)
|
||||
shardGroups, err := q.MetaClient.ShardGroupsByTimeRange(mm.Database, mm.RetentionPolicy, tmin, tmax)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -366,7 +361,7 @@ func (q *QueryExecutor) expandSources(sources influxql.Sources) (influxql.Source
|
|||
// executeDropDatabaseStatement closes all local shards for the database and removes the directory. It then calls to the metastore to remove the database from there.
|
||||
// TODO: make this work in a cluster/distributed
|
||||
func (q *QueryExecutor) executeDropDatabaseStatement(stmt *influxql.DropDatabaseStatement) *influxql.Result {
|
||||
dbi, err := q.MetaStore.Database(stmt.Name)
|
||||
dbi, err := q.MetaClient.Database(stmt.Name)
|
||||
if err != nil {
|
||||
return &influxql.Result{Err: err}
|
||||
} else if dbi == nil {
|
||||
|
@ -387,7 +382,7 @@ func (q *QueryExecutor) executeDropDatabaseStatement(stmt *influxql.DropDatabase
|
|||
|
||||
// Remove database from meta-store first so that in-flight writes can complete without error, but new ones will
|
||||
// be rejected.
|
||||
res := q.MetaStatementExecutor.ExecuteStatement(stmt)
|
||||
res := q.MetaClient.ExecuteStatement(stmt)
|
||||
|
||||
// Remove the database from the local store
|
||||
err = q.Store.DeleteDatabase(stmt.Name, shardIDs)
|
||||
|
@ -632,7 +627,7 @@ func (q *QueryExecutor) PlanShowMeasurements(stmt *influxql.ShowMeasurementsStat
|
|||
}
|
||||
|
||||
// Get the database info.
|
||||
di, err := q.MetaStore.Database(database)
|
||||
di, err := q.MetaClient.Database(database)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if di == nil {
|
||||
|
@ -668,7 +663,7 @@ func (q *QueryExecutor) PlanShowTagKeys(stmt *influxql.ShowTagKeysStatement, dat
|
|||
}
|
||||
|
||||
// Get the database info.
|
||||
di, err := q.MetaStore.Database(database)
|
||||
di, err := q.MetaClient.Database(database)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if di == nil {
|
||||
|
@ -993,7 +988,7 @@ func (q *QueryExecutor) normalizeMeasurement(m *influxql.Measurement, defaultDat
|
|||
}
|
||||
|
||||
// Find database.
|
||||
di, err := q.MetaStore.Database(m.Database)
|
||||
di, err := q.MetaClient.Database(m.Database)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if di == nil {
|
||||
|
|
|
@ -10,8 +10,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -348,12 +348,12 @@ func TestDropDatabase(t *testing.T) {
|
|||
}
|
||||
|
||||
var name string
|
||||
me := &metaExec{fn: func(stmt influxql.Statement) *influxql.Result {
|
||||
name = stmt.(*influxql.DropDatabaseStatement).Name
|
||||
return &influxql.Result{}
|
||||
}}
|
||||
executor.MetaStatementExecutor = me
|
||||
|
||||
executor.MetaClient = &testMetaClient{
|
||||
ExecuteStatemenFn: func(stmt influxql.Statement) *influxql.Result {
|
||||
name = stmt.(*influxql.DropDatabaseStatement).Name
|
||||
return &influxql.Result{}
|
||||
},
|
||||
}
|
||||
// verify the database is there on disk
|
||||
dbPath := filepath.Join(store.Path(), "foo")
|
||||
if _, err := os.Stat(dbPath); err != nil {
|
||||
|
@ -412,8 +412,8 @@ func TestQueryNoData(t *testing.T) {
|
|||
func TestAuthenticateIfUserCountZeroAndCreateUser(t *testing.T) {
|
||||
store, executor := testStoreAndExecutor("")
|
||||
defer os.RemoveAll(store.Path())
|
||||
ms := &testMetastore{userCount: 0}
|
||||
executor.MetaStore = ms
|
||||
ms := &testMetaClient{userCount: 0}
|
||||
executor.MetaClient = ms
|
||||
|
||||
if err := executor.Authorize(nil, mustParseQuery("create user foo with password 'asdf' with all privileges"), ""); err != nil {
|
||||
t.Fatalf("should have authenticated if no users and attempting to create a user but got error: %s", err.Error())
|
||||
|
@ -456,7 +456,7 @@ func testStoreAndExecutor(storePath string) (*tsdb.Store, *tsdb.QueryExecutor) {
|
|||
store.CreateShard(database, retentionPolicy, shardID)
|
||||
|
||||
executor := tsdb.NewQueryExecutor(store)
|
||||
executor.MetaStore = &testMetastore{}
|
||||
executor.MetaClient = &testMetaClient{}
|
||||
executor.ShardMapper = &testShardMapper{store: store}
|
||||
|
||||
return store, executor
|
||||
|
@ -480,11 +480,12 @@ func executeAndGetJSON(query string, executor *tsdb.QueryExecutor) string {
|
|||
return string(b)
|
||||
}
|
||||
|
||||
type testMetastore struct {
|
||||
userCount int
|
||||
type testMetaClient struct {
|
||||
userCount int
|
||||
ExecuteStatemenFn func(stmt influxql.Statement) *influxql.Result
|
||||
}
|
||||
|
||||
func (t *testMetastore) Database(name string) (*meta.DatabaseInfo, error) {
|
||||
func (t *testMetaClient) Database(name string) (*meta.DatabaseInfo, error) {
|
||||
return &meta.DatabaseInfo{
|
||||
Name: name,
|
||||
DefaultRetentionPolicy: "foo",
|
||||
|
@ -509,20 +510,20 @@ func (t *testMetastore) Database(name string) (*meta.DatabaseInfo, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (t *testMetastore) Databases() ([]meta.DatabaseInfo, error) {
|
||||
func (t *testMetaClient) Databases() ([]meta.DatabaseInfo, error) {
|
||||
db, _ := t.Database("foo")
|
||||
return []meta.DatabaseInfo{*db}, nil
|
||||
}
|
||||
|
||||
func (t *testMetastore) User(name string) (*meta.UserInfo, error) { return nil, nil }
|
||||
func (t *testMetaClient) User(name string) (*meta.UserInfo, error) { return nil, nil }
|
||||
|
||||
func (t *testMetastore) AdminUserExists() (bool, error) { return false, nil }
|
||||
func (t *testMetaClient) AdminUserExists() bool { return false }
|
||||
|
||||
func (t *testMetastore) Authenticate(username, password string) (*meta.UserInfo, error) {
|
||||
func (t *testMetaClient) Authenticate(username, password string) (*meta.UserInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *testMetastore) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) {
|
||||
func (t *testMetaClient) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) {
|
||||
return &meta.RetentionPolicyInfo{
|
||||
Name: "bar",
|
||||
ShardGroups: []meta.ShardGroupInfo{
|
||||
|
@ -541,11 +542,11 @@ func (t *testMetastore) RetentionPolicy(database, name string) (rpi *meta.Retent
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (t *testMetastore) UserCount() (int, error) {
|
||||
return t.userCount, nil
|
||||
func (t *testMetaClient) UserCount() int {
|
||||
return t.userCount
|
||||
}
|
||||
|
||||
func (t *testMetastore) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {
|
||||
func (t *testMetaClient) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {
|
||||
return []meta.ShardGroupInfo{
|
||||
{
|
||||
ID: sgID,
|
||||
|
@ -561,10 +562,17 @@ func (t *testMetastore) ShardGroupsByTimeRange(database, policy string, min, max
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (t *testMetastore) NodeID() uint64 {
|
||||
func (t *testMetaClient) NodeID() uint64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (t *testMetaClient) ExecuteStatement(stmt influxql.Statement) *influxql.Result {
|
||||
if t.ExecuteStatemenFn != nil {
|
||||
return t.ExecuteStatemenFn(stmt)
|
||||
}
|
||||
return &influxql.Result{}
|
||||
}
|
||||
|
||||
type testShardMapper struct {
|
||||
store *tsdb.Store
|
||||
}
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/influxdb/influxdb/influxql"
|
||||
"github.com/influxdb/influxdb/meta"
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/influxdb/services/meta"
|
||||
"github.com/influxdb/influxdb/tsdb"
|
||||
)
|
||||
|
||||
|
@ -24,10 +24,10 @@ var nID = uint64(42)
|
|||
|
||||
// Simple test to ensure data can be read from two shards.
|
||||
func TestWritePointsAndExecuteTwoShards(t *testing.T) {
|
||||
// Create the mock planner and its metastore
|
||||
// Create the mock planner and its MetaClient
|
||||
store, query_executor := testStoreAndQueryExecutor()
|
||||
defer os.RemoveAll(store.Path())
|
||||
query_executor.MetaStore = &testQEMetastore{
|
||||
query_executor.MetaClient = &testQEMetaClient{
|
||||
sgFunc: func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {
|
||||
return []meta.ShardGroupInfo{
|
||||
{
|
||||
|
@ -155,10 +155,10 @@ func TestWritePointsAndExecuteTwoShards(t *testing.T) {
|
|||
|
||||
// Test that executor correctly orders data across shards.
|
||||
func TestWritePointsAndExecuteTwoShardsAlign(t *testing.T) {
|
||||
// Create the mock planner and its metastore
|
||||
// Create the mock planner and its MetaClient
|
||||
store, query_executor := testStoreAndQueryExecutor()
|
||||
defer os.RemoveAll(store.Path())
|
||||
query_executor.MetaStore = &testQEMetastore{
|
||||
query_executor.MetaClient = &testQEMetaClient{
|
||||
sgFunc: func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {
|
||||
return []meta.ShardGroupInfo{
|
||||
{
|
||||
|
@ -331,10 +331,10 @@ func TestWritePointsAndExecuteTwoShardsQueryRewrite(t *testing.T) {
|
|||
// Test that executor correctly orders data across shards when the tagsets
|
||||
// are not presented in alphabetically order across shards.
|
||||
func TestWritePointsAndExecuteTwoShardsTagSetOrdering(t *testing.T) {
|
||||
// Create the mock planner and its metastore
|
||||
// Create the mock planner and its MetaClient
|
||||
store, query_executor := testStoreAndQueryExecutor()
|
||||
defer os.RemoveAll(store.Path())
|
||||
query_executor.MetaStore = &testQEMetastore{
|
||||
query_executor.MetaClient = &testQEMetaClient{
|
||||
sgFunc: func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {
|
||||
return []meta.ShardGroupInfo{
|
||||
{
|
||||
|
@ -1006,27 +1006,31 @@ func TestRawQueryDerivative_Process_Bool(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type testQEMetastore struct {
|
||||
type testQEMetaClient struct {
|
||||
sgFunc func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error)
|
||||
}
|
||||
|
||||
func (t *testQEMetastore) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {
|
||||
func (t *testQEMetaClient) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {
|
||||
return t.sgFunc(database, policy, min, max)
|
||||
}
|
||||
|
||||
func (t *testQEMetastore) Database(name string) (*meta.DatabaseInfo, error) { return nil, nil }
|
||||
func (t *testQEMetastore) Databases() ([]meta.DatabaseInfo, error) { return nil, nil }
|
||||
func (t *testQEMetastore) User(name string) (*meta.UserInfo, error) { return nil, nil }
|
||||
func (t *testQEMetastore) AdminUserExists() (bool, error) { return false, nil }
|
||||
func (t *testQEMetastore) Authenticate(username, password string) (*meta.UserInfo, error) {
|
||||
func (t *testQEMetaClient) Database(name string) (*meta.DatabaseInfo, error) { return nil, nil }
|
||||
func (t *testQEMetaClient) Databases() ([]meta.DatabaseInfo, error) { return nil, nil }
|
||||
func (t *testQEMetaClient) User(name string) (*meta.UserInfo, error) { return nil, nil }
|
||||
func (t *testQEMetaClient) AdminUserExists() bool { return false }
|
||||
func (t *testQEMetaClient) Authenticate(username, password string) (*meta.UserInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (t *testQEMetastore) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) {
|
||||
func (t *testQEMetaClient) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (t *testQEMetastore) UserCount() (int, error) { return 0, nil }
|
||||
func (t *testQEMetaClient) UserCount() int { return 0 }
|
||||
|
||||
func (t *testQEMetastore) NodeID() uint64 { return nID }
|
||||
func (t *testQEMetaClient) NodeID() uint64 { return nID }
|
||||
|
||||
func (t *testQEMetaClient) ExecuteStatement(stmt influxql.Statement) *influxql.Result {
|
||||
return &influxql.Result{}
|
||||
}
|
||||
|
||||
func testStore() *tsdb.Store {
|
||||
path, _ := ioutil.TempDir("", "")
|
||||
|
|
Loading…
Reference in New Issue