Update from Master

pull/583/head
Alex P 2016-11-29 13:11:46 -08:00
commit 17743326eb
102 changed files with 2220 additions and 2136 deletions

View File

@ -1,5 +1,7 @@
## v1.1.0 [unreleased] ## v1.1.0 [unreleased]
- #586: Allow telegraf database in non-default locations
- #576: Fix broken zoom on graphs that aren't the first.
- #575: Add Varnish Layout - #575: Add Varnish Layout
- #574: Fix broken graphs on Postgres Layouts by adding aggregates. - #574: Fix broken graphs on Postgres Layouts by adding aggregates.

View File

@ -19,6 +19,7 @@ type Client struct {
SourcesStore *SourcesStore SourcesStore *SourcesStore
ServersStore *ServersStore ServersStore *ServersStore
LayoutStore *LayoutStore LayoutStore *LayoutStore
UsersStore *UsersStore
AlertsStore *AlertsStore AlertsStore *AlertsStore
} }
@ -28,6 +29,7 @@ func NewClient() *Client {
c.SourcesStore = &SourcesStore{client: c} c.SourcesStore = &SourcesStore{client: c}
c.ServersStore = &ServersStore{client: c} c.ServersStore = &ServersStore{client: c}
c.AlertsStore = &AlertsStore{client: c} c.AlertsStore = &AlertsStore{client: c}
c.UsersStore = &UsersStore{client: c}
c.LayoutStore = &LayoutStore{ c.LayoutStore = &LayoutStore{
client: c, client: c,
IDs: &uuid.V4{}, IDs: &uuid.V4{},
@ -65,6 +67,10 @@ func (c *Client) Open() error {
if _, err := tx.CreateBucketIfNotExists(AlertsBucket); err != nil { if _, err := tx.CreateBucketIfNotExists(AlertsBucket); err != nil {
return err return err
} }
// Always create Users bucket.
if _, err := tx.CreateBucketIfNotExists(UsersBucket); err != nil {
return err
}
return nil return nil
}); err != nil { }); err != nil {
return err return err

View File

@ -51,6 +51,7 @@ func MarshalSource(s chronograf.Source) ([]byte, error) {
Password: s.Password, Password: s.Password,
URL: s.URL, URL: s.URL,
Default: s.Default, Default: s.Default,
Telegraf: s.Telegraf,
}) })
} }
@ -68,6 +69,7 @@ func UnmarshalSource(data []byte, s *chronograf.Source) error {
s.Password = pb.Password s.Password = pb.Password
s.URL = pb.URL s.URL = pb.URL
s.Default = pb.Default s.Default = pb.Default
s.Telegraf = pb.Telegraf
return nil return nil
} }
@ -203,3 +205,23 @@ func UnmarshalAlertRule(data []byte, r *ScopedAlert) error {
r.KapaID = int(pb.KapaID) r.KapaID = int(pb.KapaID)
return nil return nil
} }
// MarshalUser encodes a user to binary protobuf format.
func MarshalUser(u *chronograf.User) ([]byte, error) {
return proto.Marshal(&User{
ID: uint64(u.ID),
Email: u.Email,
})
}
// UnmarshalUser decodes a user from binary protobuf data.
func UnmarshalUser(data []byte, u *chronograf.User) error {
var pb User
if err := proto.Unmarshal(data, &pb); err != nil {
return err
}
u.ID = chronograf.UserID(pb.ID)
u.Email = pb.Email
return nil
}

View File

@ -16,6 +16,7 @@ It has these top-level messages:
Cell Cell
Query Query
AlertRule AlertRule
User
*/ */
package internal package internal
@ -35,13 +36,13 @@ var _ = math.Inf
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type Exploration struct { type Exploration struct {
ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,json=name,proto3" json:"Name,omitempty"` Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
UserID int64 `protobuf:"varint,3,opt,name=UserID,json=userID,proto3" json:"UserID,omitempty"` UserID int64 `protobuf:"varint,3,opt,name=UserID,proto3" json:"UserID,omitempty"`
Data string `protobuf:"bytes,4,opt,name=Data,json=data,proto3" json:"Data,omitempty"` Data string `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"`
CreatedAt int64 `protobuf:"varint,5,opt,name=CreatedAt,json=createdAt,proto3" json:"CreatedAt,omitempty"` CreatedAt int64 `protobuf:"varint,5,opt,name=CreatedAt,proto3" json:"CreatedAt,omitempty"`
UpdatedAt int64 `protobuf:"varint,6,opt,name=UpdatedAt,json=updatedAt,proto3" json:"UpdatedAt,omitempty"` UpdatedAt int64 `protobuf:"varint,6,opt,name=UpdatedAt,proto3" json:"UpdatedAt,omitempty"`
Default bool `protobuf:"varint,7,opt,name=Default,json=default,proto3" json:"Default,omitempty"` Default bool `protobuf:"varint,7,opt,name=Default,proto3" json:"Default,omitempty"`
} }
func (m *Exploration) Reset() { *m = Exploration{} } func (m *Exploration) Reset() { *m = Exploration{} }
@ -50,13 +51,14 @@ func (*Exploration) ProtoMessage() {}
func (*Exploration) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} } func (*Exploration) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} }
type Source struct { type Source struct {
ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,json=name,proto3" json:"Name,omitempty"` Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
Type string `protobuf:"bytes,3,opt,name=Type,json=type,proto3" json:"Type,omitempty"` Type string `protobuf:"bytes,3,opt,name=Type,proto3" json:"Type,omitempty"`
Username string `protobuf:"bytes,4,opt,name=Username,json=username,proto3" json:"Username,omitempty"` Username string `protobuf:"bytes,4,opt,name=Username,proto3" json:"Username,omitempty"`
Password string `protobuf:"bytes,5,opt,name=Password,json=password,proto3" json:"Password,omitempty"` Password string `protobuf:"bytes,5,opt,name=Password,proto3" json:"Password,omitempty"`
URL string `protobuf:"bytes,6,opt,name=URL,json=uRL,proto3" json:"URL,omitempty"` URL string `protobuf:"bytes,6,opt,name=URL,proto3" json:"URL,omitempty"`
Default bool `protobuf:"varint,7,opt,name=Default,json=default,proto3" json:"Default,omitempty"` Default bool `protobuf:"varint,7,opt,name=Default,proto3" json:"Default,omitempty"`
Telegraf string `protobuf:"bytes,8,opt,name=Telegraf,proto3" json:"Telegraf,omitempty"`
} }
func (m *Source) Reset() { *m = Source{} } func (m *Source) Reset() { *m = Source{} }
@ -65,12 +67,12 @@ func (*Source) ProtoMessage() {}
func (*Source) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} } func (*Source) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} }
type Server struct { type Server struct {
ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,json=name,proto3" json:"Name,omitempty"` Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
Username string `protobuf:"bytes,3,opt,name=Username,json=username,proto3" json:"Username,omitempty"` Username string `protobuf:"bytes,3,opt,name=Username,proto3" json:"Username,omitempty"`
Password string `protobuf:"bytes,4,opt,name=Password,json=password,proto3" json:"Password,omitempty"` Password string `protobuf:"bytes,4,opt,name=Password,proto3" json:"Password,omitempty"`
URL string `protobuf:"bytes,5,opt,name=URL,json=uRL,proto3" json:"URL,omitempty"` URL string `protobuf:"bytes,5,opt,name=URL,proto3" json:"URL,omitempty"`
SrcID int64 `protobuf:"varint,6,opt,name=SrcID,json=srcID,proto3" json:"SrcID,omitempty"` SrcID int64 `protobuf:"varint,6,opt,name=SrcID,proto3" json:"SrcID,omitempty"`
} }
func (m *Server) Reset() { *m = Server{} } func (m *Server) Reset() { *m = Server{} }
@ -79,10 +81,10 @@ func (*Server) ProtoMessage() {}
func (*Server) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} } func (*Server) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} }
type Layout struct { type Layout struct {
ID string `protobuf:"bytes,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Application string `protobuf:"bytes,2,opt,name=Application,json=application,proto3" json:"Application,omitempty"` Application string `protobuf:"bytes,2,opt,name=Application,proto3" json:"Application,omitempty"`
Measurement string `protobuf:"bytes,3,opt,name=Measurement,json=measurement,proto3" json:"Measurement,omitempty"` Measurement string `protobuf:"bytes,3,opt,name=Measurement,proto3" json:"Measurement,omitempty"`
Cells []*Cell `protobuf:"bytes,4,rep,name=Cells,json=cells" json:"Cells,omitempty"` Cells []*Cell `protobuf:"bytes,4,rep,name=Cells" json:"Cells,omitempty"`
} }
func (m *Layout) Reset() { *m = Layout{} } func (m *Layout) Reset() { *m = Layout{} }
@ -120,11 +122,11 @@ func (m *Cell) GetQueries() []*Query {
} }
type Query struct { type Query struct {
Command string `protobuf:"bytes,1,opt,name=Command,json=command,proto3" json:"Command,omitempty"` Command string `protobuf:"bytes,1,opt,name=Command,proto3" json:"Command,omitempty"`
DB string `protobuf:"bytes,2,opt,name=DB,json=dB,proto3" json:"DB,omitempty"` DB string `protobuf:"bytes,2,opt,name=DB,proto3" json:"DB,omitempty"`
RP string `protobuf:"bytes,3,opt,name=RP,json=rP,proto3" json:"RP,omitempty"` RP string `protobuf:"bytes,3,opt,name=RP,proto3" json:"RP,omitempty"`
GroupBys []string `protobuf:"bytes,4,rep,name=GroupBys,json=groupBys" json:"GroupBys,omitempty"` GroupBys []string `protobuf:"bytes,4,rep,name=GroupBys" json:"GroupBys,omitempty"`
Wheres []string `protobuf:"bytes,5,rep,name=Wheres,json=wheres" json:"Wheres,omitempty"` Wheres []string `protobuf:"bytes,5,rep,name=Wheres" json:"Wheres,omitempty"`
} }
func (m *Query) Reset() { *m = Query{} } func (m *Query) Reset() { *m = Query{} }
@ -133,10 +135,10 @@ func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} } func (*Query) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} }
type AlertRule struct { type AlertRule struct {
ID string `protobuf:"bytes,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
JSON string `protobuf:"bytes,2,opt,name=JSON,json=jSON,proto3" json:"JSON,omitempty"` JSON string `protobuf:"bytes,2,opt,name=JSON,proto3" json:"JSON,omitempty"`
SrcID int64 `protobuf:"varint,3,opt,name=SrcID,json=srcID,proto3" json:"SrcID,omitempty"` SrcID int64 `protobuf:"varint,3,opt,name=SrcID,proto3" json:"SrcID,omitempty"`
KapaID int64 `protobuf:"varint,4,opt,name=KapaID,json=kapaID,proto3" json:"KapaID,omitempty"` KapaID int64 `protobuf:"varint,4,opt,name=KapaID,proto3" json:"KapaID,omitempty"`
} }
func (m *AlertRule) Reset() { *m = AlertRule{} } func (m *AlertRule) Reset() { *m = AlertRule{} }
@ -144,6 +146,16 @@ func (m *AlertRule) String() string { return proto.CompactTextString(
func (*AlertRule) ProtoMessage() {} func (*AlertRule) ProtoMessage() {}
func (*AlertRule) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} } func (*AlertRule) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} }
type User struct {
ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Email string `protobuf:"bytes,2,opt,name=Email,proto3" json:"Email,omitempty"`
}
func (m *User) Reset() { *m = User{} }
func (m *User) String() string { return proto.CompactTextString(m) }
func (*User) ProtoMessage() {}
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{7} }
func init() { func init() {
proto.RegisterType((*Exploration)(nil), "internal.Exploration") proto.RegisterType((*Exploration)(nil), "internal.Exploration")
proto.RegisterType((*Source)(nil), "internal.Source") proto.RegisterType((*Source)(nil), "internal.Source")
@ -152,44 +164,45 @@ func init() {
proto.RegisterType((*Cell)(nil), "internal.Cell") proto.RegisterType((*Cell)(nil), "internal.Cell")
proto.RegisterType((*Query)(nil), "internal.Query") proto.RegisterType((*Query)(nil), "internal.Query")
proto.RegisterType((*AlertRule)(nil), "internal.AlertRule") proto.RegisterType((*AlertRule)(nil), "internal.AlertRule")
proto.RegisterType((*User)(nil), "internal.User")
} }
func init() { proto.RegisterFile("internal.proto", fileDescriptorInternal) } func init() { proto.RegisterFile("internal.proto", fileDescriptorInternal) }
var fileDescriptorInternal = []byte{ var fileDescriptorInternal = []byte{
// 529 bytes of a gzipped FileDescriptorProto // 541 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x93, 0x4d, 0xae, 0xd3, 0x30, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x94, 0x4b, 0x8e, 0xd3, 0x4c,
0x10, 0x80, 0xe5, 0x26, 0xce, 0x8f, 0x8b, 0x0a, 0xb2, 0x10, 0x8a, 0x10, 0x8b, 0x2a, 0x62, 0x51, 0x10, 0xc7, 0xd5, 0xb1, 0x3b, 0x89, 0x2b, 0x9f, 0xf2, 0xa1, 0xd6, 0x08, 0x59, 0x88, 0x45, 0x64,
0x36, 0x6f, 0x01, 0x27, 0x68, 0x1b, 0x84, 0x0a, 0xa5, 0xaf, 0xb8, 0x54, 0xac, 0x58, 0x98, 0xc4, 0xb1, 0x08, 0x12, 0x9a, 0x05, 0x9c, 0x20, 0x89, 0x47, 0x28, 0x30, 0x0c, 0xa1, 0x33, 0x11, 0x2b,
0xd0, 0x40, 0xfe, 0x70, 0x6c, 0xda, 0x6c, 0xd9, 0xc2, 0x31, 0xb8, 0x01, 0x17, 0x44, 0xe3, 0x3a, 0x16, 0x4d, 0x52, 0x43, 0x2c, 0x39, 0xb6, 0x69, 0xdb, 0x24, 0xde, 0xb2, 0x85, 0xdb, 0x70, 0x01,
0xa4, 0x12, 0xe8, 0xe9, 0x2d, 0xbf, 0x99, 0x49, 0xfc, 0xcd, 0x8c, 0x4d, 0x26, 0x79, 0xa5, 0x84, 0x8e, 0x86, 0xaa, 0xdd, 0x76, 0x2c, 0xf1, 0xd0, 0xec, 0xea, 0x5f, 0x55, 0xae, 0xfe, 0xd5, 0x23,
0xac, 0x78, 0x71, 0xd5, 0xc8, 0x5a, 0xd5, 0x34, 0xe8, 0x39, 0xfe, 0x8d, 0xc8, 0xf8, 0xf9, 0xa9, 0x81, 0x71, 0x94, 0x14, 0xa8, 0x13, 0x15, 0x5f, 0x66, 0x3a, 0x2d, 0x52, 0x31, 0x6c, 0x74, 0xf0,
0x29, 0x6a, 0xc9, 0x55, 0x5e, 0x57, 0x74, 0x42, 0x46, 0xab, 0x24, 0x42, 0x53, 0x34, 0x73, 0xd8, 0x83, 0xc1, 0xe8, 0xea, 0x94, 0xc5, 0xa9, 0x56, 0x45, 0x94, 0x26, 0x62, 0x0c, 0xbd, 0x65, 0xe8,
0x28, 0x4f, 0x28, 0x25, 0xee, 0x86, 0x97, 0x22, 0x1a, 0x4d, 0xd1, 0x2c, 0x64, 0x6e, 0xc5, 0x4b, 0xb3, 0x09, 0x9b, 0x3a, 0xb2, 0xb7, 0x0c, 0x85, 0x00, 0xf7, 0x46, 0x1d, 0xd0, 0xef, 0x4d, 0xd8,
0x41, 0x1f, 0x10, 0x6f, 0xdf, 0x0a, 0xb9, 0x4a, 0x22, 0xc7, 0xd4, 0x79, 0xda, 0x10, 0xd4, 0x26, 0xd4, 0x93, 0xc6, 0x16, 0x0f, 0xa1, 0xbf, 0xc9, 0x51, 0x2f, 0x43, 0xdf, 0x31, 0x79, 0x56, 0x51,
0x5c, 0xf1, 0xc8, 0x3d, 0xd7, 0x66, 0x5c, 0x71, 0xfa, 0x88, 0x84, 0x4b, 0x29, 0xb8, 0x12, 0xd9, 0x6e, 0xa8, 0x0a, 0xe5, 0xbb, 0x75, 0x2e, 0xd9, 0xe2, 0x31, 0x78, 0x0b, 0x8d, 0xaa, 0xc0, 0xdd,
0x5c, 0x45, 0xd8, 0x94, 0x87, 0x69, 0x1f, 0x80, 0xec, 0xbe, 0xc9, 0x6c, 0xd6, 0x3b, 0x67, 0x75, 0xac, 0xf0, 0xb9, 0x49, 0x3f, 0x3b, 0x28, 0xba, 0xc9, 0x76, 0x36, 0xda, 0xaf, 0xa3, 0xad, 0x43,
0x1f, 0xa0, 0x11, 0xf1, 0x13, 0xf1, 0x91, 0xeb, 0x42, 0x45, 0xfe, 0x14, 0xcd, 0x02, 0xe6, 0x67, 0xf8, 0x30, 0x08, 0xf1, 0x4e, 0x95, 0x71, 0xe1, 0x0f, 0x26, 0x6c, 0x3a, 0x94, 0x8d, 0x0c, 0x7e,
0x67, 0x8c, 0x7f, 0x21, 0xe2, 0xed, 0x6a, 0x2d, 0x53, 0x71, 0x2b, 0x61, 0x4a, 0xdc, 0xb7, 0x5d, 0x32, 0xe8, 0xaf, 0xd3, 0x52, 0x6f, 0xf1, 0x5e, 0xc0, 0x02, 0xdc, 0xdb, 0x2a, 0x43, 0x83, 0xeb,
0x23, 0x8c, 0x6e, 0xc8, 0x5c, 0xd5, 0x35, 0x82, 0x3e, 0x24, 0x01, 0x34, 0x01, 0x79, 0x2b, 0x1c, 0x49, 0x63, 0x8b, 0x47, 0x30, 0x24, 0xec, 0x84, 0x72, 0x6b, 0xe0, 0x56, 0x53, 0x6c, 0xa5, 0xf2,
0x68, 0xcb, 0x90, 0xdb, 0xf2, 0xb6, 0x3d, 0xd6, 0x32, 0x33, 0xce, 0x21, 0x0b, 0x1a, 0xcb, 0xf4, 0xfc, 0x98, 0xea, 0x9d, 0x61, 0xf6, 0x64, 0xab, 0xc5, 0x03, 0x70, 0x36, 0xf2, 0xda, 0xc0, 0x7a,
0x1e, 0x71, 0xf6, 0x6c, 0x6d, 0x64, 0x43, 0xe6, 0x68, 0xb6, 0xbe, 0x41, 0xf3, 0x27, 0x68, 0x0a, 0x92, 0xcc, 0xbf, 0x63, 0x52, 0x9d, 0x5b, 0x8c, 0xf1, 0x93, 0x56, 0x77, 0xfe, 0xb0, 0xae, 0xd3,
0xf9, 0x4d, 0xc8, 0x5b, 0x69, 0x5e, 0x2a, 0x39, 0x37, 0x28, 0xb9, 0xff, 0x57, 0xc2, 0x83, 0xd2, 0xe8, 0xe0, 0x3b, 0xb5, 0x80, 0xfa, 0x0b, 0xea, 0x7b, 0xb5, 0xd0, 0xc5, 0x75, 0xfe, 0x81, 0xeb,
0x7d, 0x82, 0x77, 0x32, 0x5d, 0x25, 0x76, 0xa6, 0xb8, 0x05, 0x88, 0xbf, 0x23, 0xe2, 0xad, 0x79, 0xfe, 0x19, 0x97, 0x9f, 0x71, 0x2f, 0x80, 0xaf, 0xf5, 0x76, 0x19, 0xda, 0x79, 0xd7, 0x22, 0xf8,
0x57, 0x6b, 0x75, 0xa1, 0x13, 0x1a, 0x9d, 0x29, 0x19, 0xcf, 0x9b, 0xa6, 0xc8, 0x53, 0x73, 0x0b, 0xca, 0xa0, 0x7f, 0xad, 0xaa, 0xb4, 0x2c, 0x3a, 0x38, 0x9e, 0xc1, 0x99, 0xc0, 0x68, 0x96, 0x65,
0xac, 0xd5, 0x98, 0x0f, 0x21, 0xa8, 0x78, 0x2d, 0x78, 0xab, 0xa5, 0x28, 0x45, 0xa5, 0xac, 0xdf, 0x71, 0xb4, 0x35, 0x17, 0x62, 0xa9, 0xba, 0x2e, 0xca, 0x78, 0x83, 0x2a, 0x2f, 0x35, 0x1e, 0x30,
0xb8, 0x1c, 0x42, 0xf4, 0x31, 0xc1, 0x4b, 0x51, 0x14, 0x6d, 0xe4, 0x4e, 0x9d, 0xd9, 0xf8, 0xe9, 0x29, 0x2c, 0x5f, 0xd7, 0x25, 0x9e, 0x00, 0x5f, 0x60, 0x1c, 0xe7, 0xbe, 0x3b, 0x71, 0xa6, 0xa3,
0xe4, 0xea, 0xef, 0xa5, 0x83, 0x30, 0xc3, 0x29, 0x24, 0xe3, 0x1f, 0x88, 0xb8, 0xc0, 0xf4, 0x0e, 0xe7, 0xe3, 0xcb, 0xf6, 0x20, 0xc9, 0x2d, 0xeb, 0x60, 0xf0, 0x8d, 0x81, 0x4b, 0x96, 0xf8, 0x0f,
0x41, 0x27, 0x63, 0x80, 0x19, 0x3a, 0x01, 0x75, 0xe6, 0x58, 0xcc, 0x50, 0x07, 0x74, 0x34, 0x47, 0xd8, 0xc9, 0x10, 0x70, 0xc9, 0x4e, 0xa4, 0x2a, 0xf3, 0x2c, 0x97, 0xac, 0x22, 0x75, 0x34, 0x4f,
0x60, 0x86, 0x8e, 0x40, 0x07, 0xd3, 0x34, 0x66, 0xe8, 0x40, 0x9f, 0x10, 0xff, 0xab, 0x16, 0x32, 0x70, 0xc9, 0x8e, 0xa4, 0xf6, 0xa6, 0x69, 0x2e, 0xd9, 0x5e, 0x3c, 0x85, 0xc1, 0xe7, 0x12, 0x75,
0x17, 0x6d, 0x84, 0xcd, 0x41, 0x77, 0x87, 0x83, 0xde, 0x68, 0x21, 0x3b, 0xd6, 0xe7, 0xe1, 0xc3, 0x84, 0xb9, 0xcf, 0xcd, 0x43, 0xff, 0x9f, 0x1f, 0x7a, 0x57, 0xa2, 0xae, 0x64, 0x13, 0xa7, 0x0f,
0xdc, 0x6e, 0x0a, 0xe5, 0x30, 0x72, 0x33, 0x5a, 0x7f, 0x18, 0x79, 0xac, 0x09, 0x36, 0xdf, 0xc0, 0x23, 0xbb, 0x45, 0x16, 0xd1, 0xc8, 0xcd, 0x68, 0x07, 0xf5, 0xc8, 0xc9, 0x0e, 0x4a, 0xe0, 0xe6,
0x12, 0x97, 0x75, 0x59, 0xf2, 0x2a, 0xb3, 0x53, 0xf1, 0xd3, 0x33, 0xc2, 0xa8, 0x92, 0x85, 0x9d, 0x1b, 0x5a, 0xf0, 0x22, 0x3d, 0x1c, 0x54, 0xb2, 0xb3, 0x53, 0x69, 0x24, 0x8d, 0x2a, 0x9c, 0xdb,
0xc8, 0x28, 0x5b, 0x00, 0xb3, 0xad, 0xed, 0x7f, 0x24, 0xb7, 0xb0, 0x99, 0x17, 0xb2, 0xd6, 0xcd, 0x89, 0xf4, 0xc2, 0x39, 0x69, 0xb9, 0xb2, 0xfd, 0xf7, 0xe4, 0x8a, 0x36, 0xf3, 0x52, 0xa7, 0x65,
0xa2, 0x3b, 0x77, 0x1e, 0xb2, 0xe0, 0x93, 0x65, 0x78, 0x29, 0xef, 0x0e, 0x42, 0x5a, 0xd5, 0x90, 0x36, 0xaf, 0xea, 0xce, 0x3d, 0xd9, 0x6a, 0xfa, 0x15, 0xbd, 0xdf, 0xa3, 0xb6, 0xa8, 0x9e, 0xb4,
0x79, 0x47, 0x43, 0xf1, 0x7b, 0x12, 0xce, 0x0b, 0x21, 0x15, 0xd3, 0x85, 0xf8, 0x67, 0x17, 0x94, 0x2a, 0xf8, 0x00, 0xde, 0x2c, 0x46, 0x5d, 0xc8, 0x32, 0xc6, 0xdf, 0x76, 0x21, 0xc0, 0x7d, 0xb5,
0xb8, 0x2f, 0x77, 0xd7, 0x9b, 0xfe, 0x6a, 0x7c, 0xde, 0x5d, 0x6f, 0x86, 0x85, 0x3a, 0x17, 0x0b, 0x7e, 0x7b, 0xd3, 0x9c, 0x06, 0xd9, 0xe7, 0x85, 0x3a, 0x9d, 0x85, 0x52, 0xf9, 0xd7, 0x2a, 0x53,
0x85, 0xdf, 0xbf, 0xe2, 0x0d, 0x5f, 0x25, 0x66, 0x3a, 0x0e, 0xf3, 0xbe, 0x18, 0xfa, 0xe0, 0x99, 0xcb, 0xd0, 0x4c, 0xc7, 0x91, 0x56, 0x05, 0xcf, 0xc0, 0xa5, 0xc3, 0xe9, 0x54, 0x76, 0x4d, 0xe5,
0x57, 0xfe, 0xec, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x02, 0xe9, 0x30, 0xf7, 0x03, 0x00, 0x0b, 0xe0, 0x57, 0x07, 0x15, 0xc5, 0xb6, 0x74, 0x2d, 0x3e, 0xf6, 0xcd, 0xff, 0xc5, 0x8b, 0x5f,
0x00, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x2c, 0x30, 0x90, 0x41, 0x04, 0x00, 0x00,
} }

View File

@ -19,6 +19,7 @@ message Source {
string Password = 5; string Password = 5;
string URL = 6; // URL are the connections to the source string URL = 6; // URL are the connections to the source
bool Default = 7; // Flags an exploration as the default. bool Default = 7; // Flags an exploration as the default.
string Telegraf = 8; // Telegraf is the db telegraf is written to. By default it is "telegraf"
} }
message Server { message Server {
@ -61,3 +62,8 @@ message AlertRule {
int64 SrcID = 3; // SrcID is the id of the source this alert is associated with int64 SrcID = 3; // SrcID is the id of the source this alert is associated with
int64 KapaID = 4; // KapaID is the id of the kapacitor this alert is associated with int64 KapaID = 4; // KapaID is the id of the kapacitor this alert is associated with
} }
message User {
uint64 ID = 1; // ID is the unique ID of this user
string Email = 2; // Email byte representation of the user
}

View File

@ -39,6 +39,7 @@ func TestMarshalSource(t *testing.T) {
Password: "1 point twenty-one g1g@w@tts", Password: "1 point twenty-one g1g@w@tts",
URL: "http://twin-pines.mall.io:8086", URL: "http://twin-pines.mall.io:8086",
Default: true, Default: true,
Telegraf: "telegraf",
} }
var vv chronograf.Source var vv chronograf.Source

129
bolt/users.go Normal file
View File

@ -0,0 +1,129 @@
package bolt
import (
"context"
"github.com/boltdb/bolt"
"github.com/influxdata/chronograf"
"github.com/influxdata/chronograf/bolt/internal"
)
// Ensure UsersStore implements chronograf.UsersStore.
var _ chronograf.UsersStore = &UsersStore{}
var UsersBucket = []byte("Users")
type UsersStore struct {
client *Client
}
// FindByEmail searches the UsersStore for all users owned with the email
func (s *UsersStore) FindByEmail(ctx context.Context, email string) (*chronograf.User, error) {
var user chronograf.User
err := s.client.db.View(func(tx *bolt.Tx) error {
err := tx.Bucket(UsersBucket).ForEach(func(k, v []byte) error {
var u chronograf.User
if err := internal.UnmarshalUser(v, &u); err != nil {
return err
} else if u.Email != email {
return nil
}
user.Email = u.Email
user.ID = u.ID
return nil
})
if err != nil {
return err
}
if user.ID == 0 {
return chronograf.ErrUserNotFound
}
return nil
})
if err != nil {
return nil, err
}
return &user, nil
}
// Create a new Users in the UsersStore.
func (s *UsersStore) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(UsersBucket)
seq, err := b.NextSequence()
if err != nil {
return err
}
u.ID = chronograf.UserID(seq)
if v, err := internal.MarshalUser(u); err != nil {
return err
} else if err := b.Put(itob(int(u.ID)), v); err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return u, nil
}
// Delete the users from the UsersStore
func (s *UsersStore) Delete(ctx context.Context, u *chronograf.User) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket(UsersBucket).Delete(itob(int(u.ID))); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}
// Get retrieves a user by id.
func (s *UsersStore) Get(ctx context.Context, id chronograf.UserID) (*chronograf.User, error) {
var u chronograf.User
if err := s.client.db.View(func(tx *bolt.Tx) error {
if v := tx.Bucket(UsersBucket).Get(itob(int(id))); v == nil {
return chronograf.ErrUserNotFound
} else if err := internal.UnmarshalUser(v, &u); err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return &u, nil
}
// Update a user
func (s *UsersStore) Update(ctx context.Context, usr *chronograf.User) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
// Retrieve an existing user with the same ID.
var u chronograf.User
b := tx.Bucket(UsersBucket)
if v := b.Get(itob(int(usr.ID))); v == nil {
return chronograf.ErrUserNotFound
} else if err := internal.UnmarshalUser(v, &u); err != nil {
return err
}
u.Email = usr.Email
if v, err := internal.MarshalUser(&u); err != nil {
return err
} else if err := b.Put(itob(int(u.ID)), v); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"BytesPerSec\")) AS \"bytes_per_sec\" FROM apache", "query": "SELECT non_negative_derivative(max(\"BytesPerSec\")) AS \"bytes_per_sec\" FROM apache",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],
@ -32,8 +30,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"ReqPerSec\")) AS \"req_per_sec\" FROM apache", "query": "SELECT non_negative_derivative(max(\"ReqPerSec\")) AS \"req_per_sec\" FROM apache",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],
@ -51,8 +47,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"TotalAccesses\")) AS \"tot_access\" FROM apache", "query": "SELECT non_negative_derivative(max(\"TotalAccesses\")) AS \"tot_access\" FROM apache",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT count(\"check_id\") as \"Number Critical\" FROM consul_health_checks", "query": "SELECT count(\"check_id\") as \"Number Critical\" FROM consul_health_checks",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"service_name\"" "\"service_name\""
], ],
@ -34,8 +32,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT count(\"check_id\") as \"Number Warning\" FROM consul_health_checks", "query": "SELECT count(\"check_id\") as \"Number Warning\" FROM consul_health_checks",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"service_name\"" "\"service_name\""
], ],

View File

@ -12,9 +12,7 @@
"name": "CPU Usage", "name": "CPU Usage",
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"telegraf\"..\"cpu\"", "query": "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"used_percent\") AS \"used_percent\" FROM disk", "query": "SELECT mean(\"used_percent\") AS \"used_percent\" FROM disk",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"path\"" "\"path\""
], ],

View File

@ -2,20 +2,23 @@
"id": "0e980b97-c162-487b-a815-3f955df6243f", "id": "0e980b97-c162-487b-a815-3f955df6243f",
"measurement": "docker", "measurement": "docker",
"app": "docker", "app": "docker",
"cells": [{ "cells": [
{
"x": 0, "x": 0,
"y": 0, "y": 0,
"w": 4, "w": 4,
"h": 4, "h": 4,
"i": "4c79cefb-5152-410c-9b88-74f9bff7ef22", "i": "4c79cefb-5152-410c-9b88-74f9bff7ef22",
"name": "Docker - Container CPU", "name": "Docker - Container CPU",
"queries": [{ "queries": [
{
"query": "SELECT mean(\"usage_percent\") AS \"usage_percent\" FROM \"docker_container_cpu\"", "query": "SELECT mean(\"usage_percent\") AS \"usage_percent\" FROM \"docker_container_cpu\"",
"db": "telegraf", "groupbys": [
"rp": "", "\"container_name\""
"groupbys": ["\"container_name\""], ],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -24,13 +27,15 @@
"h": 4, "h": 4,
"i": "4c79cefb-5152-410c-9b88-74f9bff7ef00", "i": "4c79cefb-5152-410c-9b88-74f9bff7ef00",
"name": "Docker - Container Memory", "name": "Docker - Container Memory",
"queries": [{ "queries": [
{
"query": "SELECT mean(\"usage\") AS \"usage\" FROM \"docker_container_mem\"", "query": "SELECT mean(\"usage\") AS \"usage\" FROM \"docker_container_mem\"",
"db": "telegraf", "groupbys": [
"rp": "", "\"container_name\""
"groupbys": ["\"container_name\""], ],
"wheres": [] "wheres": []
}] }
}] ]
}
]
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "select non_negative_derivative(mean(search_query_total)) as searches_per_min, non_negative_derivative(mean(search_scroll_total)) as scrolls_per_min, non_negative_derivative(mean(search_fetch_total)) as fetches_per_min, non_negative_derivative(mean(search_suggest_total)) as suggests_per_min from elasticsearch_indices", "query": "select non_negative_derivative(mean(search_query_total)) as searches_per_min, non_negative_derivative(mean(search_scroll_total)) as scrolls_per_min, non_negative_derivative(mean(search_fetch_total)) as fetches_per_min, non_negative_derivative(mean(search_suggest_total)) as suggests_per_min from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -30,8 +28,6 @@
"queries": [ "queries": [
{ {
"query": "select mean(current_open) from elasticsearch_http", "query": "select mean(current_open) from elasticsearch_http",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -47,8 +43,6 @@
"queries": [ "queries": [
{ {
"query": "select non_negative_derivative(mean(search_query_time_in_millis)) as mean, non_negative_derivative(median(search_query_time_in_millis)) as median, non_negative_derivative(percentile(search_query_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", "query": "select non_negative_derivative(mean(search_query_time_in_millis)) as mean, non_negative_derivative(median(search_query_time_in_millis)) as median, non_negative_derivative(percentile(search_query_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -64,8 +58,6 @@
"queries": [ "queries": [
{ {
"query": "select non_negative_derivative(mean(search_fetch_time_in_millis)) as mean, non_negative_derivative(median(search_fetch_time_in_millis)) as median, non_negative_derivative(percentile(search_fetch_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", "query": "select non_negative_derivative(mean(search_fetch_time_in_millis)) as mean, non_negative_derivative(median(search_fetch_time_in_millis)) as median, non_negative_derivative(percentile(search_fetch_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -81,8 +73,6 @@
"queries": [ "queries": [
{ {
"query": "select non_negative_derivative(mean(search_suggest_time_in_millis)) as mean, non_negative_derivative(median(search_suggest_time_in_millis)) as median, non_negative_derivative(percentile(search_suggest_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", "query": "select non_negative_derivative(mean(search_suggest_time_in_millis)) as mean, non_negative_derivative(median(search_suggest_time_in_millis)) as median, non_negative_derivative(percentile(search_suggest_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -98,8 +88,6 @@
"queries": [ "queries": [
{ {
"query": "select non_negative_derivative(mean(search_scroll_time_in_millis)) as mean, non_negative_derivative(median(search_scroll_time_in_millis)) as median, non_negative_derivative(percentile(search_scroll_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", "query": "select non_negative_derivative(mean(search_scroll_time_in_millis)) as mean, non_negative_derivative(median(search_scroll_time_in_millis)) as median, non_negative_derivative(percentile(search_scroll_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -115,8 +103,6 @@
"queries": [ "queries": [
{ {
"query": "select non_negative_derivative(mean(indexing_index_time_in_millis)) as mean from elasticsearch_indices", "query": "select non_negative_derivative(mean(indexing_index_time_in_millis)) as mean from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -132,8 +118,6 @@
"queries": [ "queries": [
{ {
"query": "select mean(gc_collectors_old_collection_count) as old_count, mean(gc_collectors_young_collection_count) as young_count from elasticsearch_jvm", "query": "select mean(gc_collectors_old_collection_count) as old_count, mean(gc_collectors_young_collection_count) as young_count from elasticsearch_jvm",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -149,8 +133,6 @@
"queries": [ "queries": [
{ {
"query": "select non_negative_derivative(mean(gc_collectors_old_collection_time_in_millis)) as mean_old_time, non_negative_derivative(mean(gc_collectors_young_collection_time_in_millis)) as mean_young_time from elasticsearch_jvm", "query": "select non_negative_derivative(mean(gc_collectors_old_collection_time_in_millis)) as mean_old_time, non_negative_derivative(mean(gc_collectors_young_collection_time_in_millis)) as mean_young_time from elasticsearch_jvm",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -166,8 +148,6 @@
"queries": [ "queries": [
{ {
"query": "select mean(mem_heap_used_percent) from elasticsearch_jvm", "query": "select mean(mem_heap_used_percent) from elasticsearch_jvm",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "select mean(\"active_servers\") AS active_servers, mean(\"backup_servers\") AS backup_servers FROM haproxy", "query": "select mean(\"active_servers\") AS active_servers, mean(\"backup_servers\") AS backup_servers FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -30,8 +28,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(last(\"http_response.2xx\"), 1s) AS \"2xx\" FROM haproxy", "query": "SELECT non_negative_derivative(last(\"http_response.2xx\"), 1s) AS \"2xx\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -47,8 +43,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(last(\"http_response.4xx\"), 1s) AS \"4xx\" FROM haproxy", "query": "SELECT non_negative_derivative(last(\"http_response.4xx\"), 1s) AS \"4xx\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -64,8 +58,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(last(\"http_response.5xx\"), 1s) AS \"5xx\" FROM haproxy", "query": "SELECT non_negative_derivative(last(\"http_response.5xx\"), 1s) AS \"5xx\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -81,8 +73,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"req_rate\") AS \"requests_per_second\" FROM haproxy", "query": "SELECT mean(\"req_rate\") AS \"requests_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -98,8 +88,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"rate\")) AS \"sessions_per_second\" FROM haproxy", "query": "SELECT non_negative_derivative(max(\"rate\")) AS \"sessions_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -115,8 +103,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"scur\")) / non_negative_derivative(max(\"slim\")) * 100 AS \"session_usage_percent\" FROM haproxy", "query": "SELECT non_negative_derivative(max(\"scur\")) / non_negative_derivative(max(\"slim\")) * 100 AS \"session_usage_percent\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -132,8 +118,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"dreq\")) AS \"denials_per_second\" FROM haproxy", "query": "SELECT non_negative_derivative(max(\"dreq\")) AS \"denials_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -149,8 +133,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"ereq\")) AS \"errors_per_second\" FROM haproxy", "query": "SELECT non_negative_derivative(max(\"ereq\")) AS \"errors_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -166,15 +148,11 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"bin\")) AS \"bytes_in_per_second\" FROM haproxy", "query": "SELECT non_negative_derivative(max(\"bin\")) AS \"bytes_in_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}, },
{ {
"query": "SELECT non_negative_derivative(max(\"bout\")) AS \"bytes_out_per_second\" FROM haproxy", "query": "SELECT non_negative_derivative(max(\"bout\")) AS \"bytes_out_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -190,8 +168,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"rtime\") AS \"response_time\" FROM haproxy", "query": "SELECT max(\"rtime\") AS \"response_time\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -207,8 +183,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"econ\")) AS \"errors_per_second\" FROM haproxy", "query": "SELECT non_negative_derivative(max(\"econ\")) AS \"errors_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -224,8 +198,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"qcur\")) AS \"queued_per_second\" FROM haproxy", "query": "SELECT non_negative_derivative(max(\"qcur\")) AS \"queued_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -241,8 +213,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"qtime\") AS \"queue_time\" FROM haproxy", "query": "SELECT max(\"qtime\") AS \"queue_time\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -258,8 +228,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"eresp\") AS \"error_response_rate\" FROM haproxy", "query": "SELECT max(\"eresp\") AS \"error_response_rate\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,18 +13,12 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"numMeasurements\") AS \"measurements\" FROM \"influxdb_database\"", "query": "SELECT max(\"numMeasurements\") AS \"measurements\" FROM \"influxdb_database\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
}, },
{ {
"query": "SELECT max(\"numSeries\") AS \"series\" FROM \"influxdb_database\"", "query": "SELECT max(\"numSeries\") AS \"series\" FROM \"influxdb_database\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
} }
] ]

View File

@ -13,10 +13,7 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"writeReq\"), 1s) AS \"http_requests\" FROM \"influxdb_httpd\"", "query": "SELECT non_negative_derivative(max(\"writeReq\"), 1s) AS \"http_requests\" FROM \"influxdb_httpd\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
} }
] ]
@ -31,10 +28,7 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"queryReq\"), 1s) AS \"query_requests\" FROM \"influxdb_httpd\"", "query": "SELECT non_negative_derivative(max(\"queryReq\"), 1s) AS \"query_requests\" FROM \"influxdb_httpd\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
} }
] ]
@ -49,18 +43,12 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"clientError\"), 1s) AS \"client_errors\" FROM \"influxdb_httpd\"", "query": "SELECT non_negative_derivative(max(\"clientError\"), 1s) AS \"client_errors\" FROM \"influxdb_httpd\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
}, },
{ {
"query": "SELECT non_negative_derivative(max(\"authFail\"), 1s) AS \"auth_fail\" FROM \"influxdb_httpd\"", "query": "SELECT non_negative_derivative(max(\"authFail\"), 1s) AS \"auth_fail\" FROM \"influxdb_httpd\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
} }
] ]

View File

@ -13,18 +13,12 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"queryDurationNs\"), 1s) AS \"duration\" FROM \"influxdb_queryExecutor\"", "query": "SELECT non_negative_derivative(max(\"queryDurationNs\"), 1s) AS \"duration\" FROM \"influxdb_queryExecutor\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
}, },
{ {
"query": "SELECT non_negative_derivative(max(\"queriesExecuted\"), 1s) AS \"queries_executed\" FROM \"influxdb_queryExecutor\"", "query": "SELECT non_negative_derivative(max(\"queriesExecuted\"), 1s) AS \"queries_executed\" FROM \"influxdb_queryExecutor\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
} }
] ]

View File

@ -13,10 +13,7 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"pointReq\"), 1s) AS \"points_written\" FROM \"influxdb_write\"", "query": "SELECT non_negative_derivative(max(\"pointReq\"), 1s) AS \"points_written\" FROM \"influxdb_write\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
} }
] ]
@ -31,18 +28,12 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"writeError\"), 1s) AS \"shard_write_error\" FROM \"influxdb_write\"", "query": "SELECT non_negative_derivative(max(\"writeError\"), 1s) AS \"shard_write_error\" FROM \"influxdb_write\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
}, },
{ {
"query": "SELECT non_negative_derivative(max(\"serveError\"), 1s) AS \"http_error\" FROM \"influxdb_httpd\"", "query": "SELECT non_negative_derivative(max(\"serveError\"), 1s) AS \"http_error\" FROM \"influxdb_httpd\"",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
} }
] ]

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_node", "query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_node",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"node_name\"" "\"node_name\""
], ],
@ -32,8 +30,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_node", "query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_node",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"node_name\"" "\"node_name\""
], ],

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_pod_container", "query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_pod_container",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"pod_name\"" "\"pod_name\""
], ],
@ -32,8 +30,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_pod_container", "query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_pod_container",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"pod_name\"" "\"pod_name\""
], ],

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"tx_bytes\")) AS \"tx_bytes_per_second\" FROM kubernetes_pod_network", "query": "SELECT non_negative_derivative(max(\"tx_bytes\")) AS \"tx_bytes_per_second\" FROM kubernetes_pod_network",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"pod_name\"", "\"pod_name\"",
"\"host\"" "\"host\""
@ -33,8 +31,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"rx_bytes\")) AS \"rx_bytes_per_second\" FROM kubernetes_pod_network", "query": "SELECT non_negative_derivative(max(\"rx_bytes\")) AS \"rx_bytes_per_second\" FROM kubernetes_pod_network",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"pod_name\"", "\"pod_name\"",
"\"host\"" "\"host\""

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_system_container", "query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_system_container",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [ "wheres": [
"\"container_name\" = 'kubelet'" "\"container_name\" = 'kubelet'"
@ -32,8 +30,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_system_container", "query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_system_container",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [ "wheres": [
"\"container_name\" = 'kubelet'" "\"container_name\" = 'kubelet'"

View File

@ -12,9 +12,7 @@
"name": "System Load", "name": "System Load",
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"load1\") AS \"load\" FROM \"telegraf\"..\"system\"", "query": "SELECT mean(\"load1\") AS \"load\" FROM \"system\"",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -12,9 +12,7 @@
"name": "System - Memory Bytes Used", "name": "System - Memory Bytes Used",
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"used\") AS \"used\", mean(\"available\") AS \"available\" FROM \"telegraf\"..\"mem\"", "query": "SELECT mean(\"used\") AS \"used\", mean(\"available\") AS \"available\" FROM \"mem\"",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -2,20 +2,21 @@
"id": "f280c8c7-0530-425c-b281-788d8ded7676", "id": "f280c8c7-0530-425c-b281-788d8ded7676",
"measurement": "memcached", "measurement": "memcached",
"app": "memcached", "app": "memcached",
"cells": [{ "cells": [
{
"x": 0, "x": 0,
"y": 0, "y": 0,
"w": 4, "w": 4,
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af490", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af490",
"name": "Memcached - Current Connections", "name": "Memcached - Current Connections",
"queries": [{ "queries": [
{
"query": "SELECT max(\"curr_connections\") AS \"current_connections\" FROM memcached", "query": "SELECT max(\"curr_connections\") AS \"current_connections\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -24,13 +25,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af400", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af400",
"name": "Memcached - Get Hits/Second", "name": "Memcached - Get Hits/Second",
"queries": [{ "queries": [
{
"query": "SELECT non_negative_derivative(max(\"get_hits\"), 1s) AS \"get_hits\" FROM memcached", "query": "SELECT non_negative_derivative(max(\"get_hits\"), 1s) AS \"get_hits\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -39,13 +40,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af405", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af405",
"name": "Memcached - Get Misses/Second", "name": "Memcached - Get Misses/Second",
"queries": [{ "queries": [
{
"query": "SELECT non_negative_derivative(max(\"get_misses\"), 1s) AS \"get_misses\" FROM memcached", "query": "SELECT non_negative_derivative(max(\"get_misses\"), 1s) AS \"get_misses\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -54,13 +55,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af413", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af413",
"name": "Memcached - Delete Hits/Second", "name": "Memcached - Delete Hits/Second",
"queries": [{ "queries": [
{
"query": "SELECT non_negative_derivative(max(\"delete_hits\"), 1s) AS \"delete_hits\" FROM memcached", "query": "SELECT non_negative_derivative(max(\"delete_hits\"), 1s) AS \"delete_hits\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -69,13 +70,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af412", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af412",
"name": "Memcached - Delete Misses/Second", "name": "Memcached - Delete Misses/Second",
"queries": [{ "queries": [
{
"query": "SELECT non_negative_derivative(max(\"delete_misses\"), 1s) AS \"delete_misses\" FROM memcached", "query": "SELECT non_negative_derivative(max(\"delete_misses\"), 1s) AS \"delete_misses\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -84,13 +85,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af411", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af411",
"name": "Memcached - Incr Hits/Second", "name": "Memcached - Incr Hits/Second",
"queries": [{ "queries": [
{
"query": "SELECT non_negative_derivative(max(\"incr_hits\"), 1s) AS \"incr_hits\" FROM memcached", "query": "SELECT non_negative_derivative(max(\"incr_hits\"), 1s) AS \"incr_hits\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -99,13 +100,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af510", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af510",
"name": "Memcached - Incr Misses/Second", "name": "Memcached - Incr Misses/Second",
"queries": [{ "queries": [
{
"query": "SELECT non_negative_derivative(max(\"incr_misses\"), 1s) AS \"incr_misses\" FROM memcached", "query": "SELECT non_negative_derivative(max(\"incr_misses\"), 1s) AS \"incr_misses\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -114,13 +115,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af402", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af402",
"name": "Memcached - Current Items", "name": "Memcached - Current Items",
"queries": [{ "queries": [
{
"query": "SELECT max(\"curr_items\") AS \"current_items\" FROM memcached", "query": "SELECT max(\"curr_items\") AS \"current_items\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -129,13 +130,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af403", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af403",
"name": "Memcached - Total Items", "name": "Memcached - Total Items",
"queries": [{ "queries": [
{
"query": "SELECT max(\"total_items\") AS \"total_items\" FROM memcached", "query": "SELECT max(\"total_items\") AS \"total_items\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -144,13 +145,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af404", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af404",
"name": "Memcached - Bytes Stored", "name": "Memcached - Bytes Stored",
"queries": [{ "queries": [
{
"query": "SELECT max(\"bytes\") AS \"bytes\" FROM memcached", "query": "SELECT max(\"bytes\") AS \"bytes\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -159,13 +160,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af406", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af406",
"name": "Memcached - Bytes Read/Sec", "name": "Memcached - Bytes Read/Sec",
"queries": [{ "queries": [
{
"query": "SELECT non_negative_derivative(max(\"bytes_read\"), 1s) AS \"bytes_read\" FROM memcached", "query": "SELECT non_negative_derivative(max(\"bytes_read\"), 1s) AS \"bytes_read\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -174,13 +175,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af407", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af407",
"name": "Memcached - Bytes Written/Sec", "name": "Memcached - Bytes Written/Sec",
"queries": [{ "queries": [
{
"query": "SELECT non_negative_derivative(max(\"bytes_written\"), 1s) AS \"bytes_written\" FROM memcached", "query": "SELECT non_negative_derivative(max(\"bytes_written\"), 1s) AS \"bytes_written\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -189,13 +190,13 @@
"h": 4, "h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af401", "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af401",
"name": "Memcached - Evictions/10 Seconds", "name": "Memcached - Evictions/10 Seconds",
"queries": [{ "queries": [
{
"query": "SELECT non_negative_derivative(max(\"evictions\"), 10s) AS \"evictions\" FROM memcached", "query": "SELECT non_negative_derivative(max(\"evictions\"), 10s) AS \"evictions\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
} }
] ]
} }

View File

@ -2,20 +2,21 @@
"id": "921298ad-0cdd-44f4-839b-10c319e7fcc7", "id": "921298ad-0cdd-44f4-839b-10c319e7fcc7",
"measurement": "mongodb", "measurement": "mongodb",
"app": "mongodb", "app": "mongodb",
"cells": [{ "cells": [
{
"x": 0, "x": 0,
"y": 0, "y": 0,
"w": 4, "w": 4,
"h": 4, "h": 4,
"i": "b2631fd5-7d32-4a31-9edf-98362fd3626e", "i": "b2631fd5-7d32-4a31-9edf-98362fd3626e",
"name": "MongoDB Read/Second", "name": "MongoDB Read/Second",
"queries": [{ "queries": [
{
"query": "SELECT mean(queries_per_sec) AS queries_per_second, mean(getmores_per_sec) AS getmores_per_second FROM mongodb", "query": "SELECT mean(queries_per_sec) AS queries_per_second, mean(getmores_per_sec) AS getmores_per_second FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -24,13 +25,13 @@
"h": 4, "h": 4,
"i": "9362e390-951b-4dba-adec-40c261e37604", "i": "9362e390-951b-4dba-adec-40c261e37604",
"name": "MongoDB Writes/Second", "name": "MongoDB Writes/Second",
"queries": [{ "queries": [
{
"query": "SELECT mean(inserts_per_sec) AS inserts_per_second, mean(updates_per_sec) AS updates_per_second, mean(deletes_per_sec) AS deletes_per_second FROM mongodb", "query": "SELECT mean(inserts_per_sec) AS inserts_per_second, mean(updates_per_sec) AS updates_per_second, mean(deletes_per_sec) AS deletes_per_second FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -39,15 +40,14 @@
"h": 4, "h": 4,
"i": "7ca54d4c-9f0d-47fd-a7fe-2d01e832bbf4", "i": "7ca54d4c-9f0d-47fd-a7fe-2d01e832bbf4",
"name": "MongoDB Active Connections", "name": "MongoDB Active Connections",
"queries": [{ "queries": [
{
"query": "SELECT mean(open_connections) AS open_connections FROM mongodb", "query": "SELECT mean(open_connections) AS open_connections FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
"y": 0, "y": 0,
@ -55,13 +55,13 @@
"h": 4, "h": 4,
"i": "ea5ae388-9ca3-42f9-835f-cc9b265705be", "i": "ea5ae388-9ca3-42f9-835f-cc9b265705be",
"name": "MongoDB Reads/Writes Waiting in Queue", "name": "MongoDB Reads/Writes Waiting in Queue",
"queries": [{ "queries": [
{
"query": "SELECT max(queued_reads) AS queued_reads, max(queued_writes) as queued_writes FROM mongodb", "query": "SELECT max(queued_reads) AS queued_reads, max(queued_writes) as queued_writes FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
@ -70,15 +70,14 @@
"h": 4, "h": 4,
"i": "631dcbba-c997-4fd7-b640-754a1b36026c", "i": "631dcbba-c997-4fd7-b640-754a1b36026c",
"name": "MongoDB Network Bytes/Second", "name": "MongoDB Network Bytes/Second",
"queries": [{ "queries": [
{
"query": "SELECT mean(net_in_bytes) AS net_in_bytes, mean(net_out_bytes) as net_out_bytes FROM mongodb", "query": "SELECT mean(net_in_bytes) AS net_in_bytes, mean(net_out_bytes) as net_out_bytes FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }
]
}, },
{ {
"x": 0, "x": 0,
"y": 0, "y": 0,
@ -86,16 +85,14 @@
"h": 4, "h": 4,
"i": "5b03bef0-e5e9-4b53-b5f8-1d1b740cf5a2", "i": "5b03bef0-e5e9-4b53-b5f8-1d1b740cf5a2",
"name": "MongoDB Page Faults", "name": "MongoDB Page Faults",
"queries": [{ "queries": [
{
"query": "SELECT mean(page_faults_per_sec) AS page_faults_per_second FROM mongodb", "query": "SELECT mean(page_faults_per_sec) AS page_faults_per_second FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
] ]
}, },
{ {
"x": 0, "x": 0,
"y": 0, "y": 0,
@ -103,15 +100,13 @@
"h": 4, "h": 4,
"i": "4bc98883-2347-46bb-9459-1c6fe7fb47a8", "i": "4bc98883-2347-46bb-9459-1c6fe7fb47a8",
"name": "MongoDB Memory Usage (MB)", "name": "MongoDB Memory Usage (MB)",
"queries": [{ "queries": [
{
"query": "SELECT mean(vsize_megabytes) AS virtual_memory_megabytes, mean(resident_megabytes) as resident_memory_megabytes FROM mongodb", "query": "SELECT mean(vsize_megabytes) AS virtual_memory_megabytes, mean(resident_megabytes) as resident_memory_megabytes FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
] ]
} }
] ]
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"commands_select\")) AS selects_per_second FROM mysql", "query": "SELECT non_negative_derivative(max(\"commands_select\")) AS selects_per_second FROM mysql",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],
@ -32,8 +30,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"commands_insert\")) AS inserts_per_second, non_negative_derivative(max(\"commands_update\")) AS updates_per_second, non_negative_derivative(max(\"commands_delete\")) AS deletes_per_second FROM mysql", "query": "SELECT non_negative_derivative(max(\"commands_insert\")) AS inserts_per_second, non_negative_derivative(max(\"commands_update\")) AS updates_per_second, non_negative_derivative(max(\"commands_delete\")) AS deletes_per_second FROM mysql",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],
@ -51,8 +47,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"threads_connected\")) AS cxn_per_second, non_negative_derivative(max(\"threads_running\")) AS threads_running_per_second FROM mysql", "query": "SELECT non_negative_derivative(max(\"threads_connected\")) AS cxn_per_second, non_negative_derivative(max(\"threads_running\")) AS threads_running_per_second FROM mysql",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],
@ -70,8 +64,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"connection_errors_max_connections\")) AS cxn_errors_per_second, non_negative_derivative(max(\"connection_errors_internal\")) AS internal_cxn_errors_per_second, non_negative_derivative(max(\"aborted_connects\")) AS cxn_aborted_per_second FROM mysql", "query": "SELECT non_negative_derivative(max(\"connection_errors_max_connections\")) AS cxn_errors_per_second, non_negative_derivative(max(\"connection_errors_internal\")) AS internal_cxn_errors_per_second, non_negative_derivative(max(\"aborted_connects\")) AS cxn_aborted_per_second FROM mysql",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],

View File

@ -1,7 +1,7 @@
{ {
"id": "ff41d044-f61a-4522-8de7-9e39e3a1b5de", "id": "ff41d044-f61a-4522-8de7-9e39e3a1b5de",
"measurement": "netstat", "measurement": "netstat",
"app": "network", "app": "system",
"cells": [ "cells": [
{ {
"x": 0, "x": 0,
@ -13,15 +13,11 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"tcp_established\") AS \"tcp_established\" FROM netstat", "query": "SELECT mean(\"tcp_established\") AS \"tcp_established\" FROM netstat",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}, },
{ {
"query": "SELECT mean(\"udp_socket\") AS \"udp_socket\" FROM netstat", "query": "SELECT mean(\"udp_socket\") AS \"udp_socket\" FROM netstat",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -37,15 +33,11 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"tcp_established\")) AS \"tcp_established\" FROM netstat", "query": "SELECT non_negative_derivative(max(\"tcp_established\")) AS \"tcp_established\" FROM netstat",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}, },
{ {
"query": "SELECT non_negative_derivative(max(\"udp_socket\")) AS \"udp_socket\" FROM netstat", "query": "SELECT non_negative_derivative(max(\"udp_socket\")) AS \"udp_socket\" FROM netstat",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -50,8 +50,6 @@ cat > $APP_FILE << EOF
"name": "User facing cell Name", "name": "User facing cell Name",
"queries": [{ "queries": [{
"query": "select mean(\"used_percent from\") from disk", "query": "select mean(\"used_percent from\") from disk",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}] }]

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"accepts\"), 1s) AS \"accepts\", non_negative_derivative(max(\"handled\"), 1s) AS \"handled\", non_negative_derivative(max(\"active\"), 1s) AS \"active\" FROM nginx", "query": "SELECT non_negative_derivative(max(\"accepts\"), 1s) AS \"accepts\", non_negative_derivative(max(\"handled\"), 1s) AS \"handled\", non_negative_derivative(max(\"active\"), 1s) AS \"active\" FROM nginx",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],
@ -32,8 +30,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"accepts\")) - non_negative_derivative(max(\"handled\")) FROM nginx", "query": "SELECT non_negative_derivative(max(\"accepts\")) - non_negative_derivative(max(\"handled\")) FROM nginx",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],
@ -51,8 +47,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"requests\"), 1s) AS \"requests\" FROM nginx", "query": "SELECT non_negative_derivative(max(\"requests\"), 1s) AS \"requests\" FROM nginx",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],
@ -70,8 +64,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"waiting\"), 1s) AS \"waiting\", non_negative_derivative(max(\"reading\"), 1s) AS \"reading\", non_negative_derivative(max(\"writing\"), 1s) AS \"writing\" FROM nginx", "query": "SELECT non_negative_derivative(max(\"waiting\"), 1s) AS \"waiting\", non_negative_derivative(max(\"reading\"), 1s) AS \"reading\", non_negative_derivative(max(\"writing\"), 1s) AS \"writing\" FROM nginx",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"client_count\") AS \"client_count\" FROM nsq_channel", "query": "SELECT mean(\"client_count\") AS \"client_count\" FROM nsq_channel",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"topic\"", "\"topic\"",
"\"channel\"" "\"channel\""
@ -33,8 +31,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"message_count\") AS \"message_count\" FROM nsq_channel", "query": "SELECT mean(\"message_count\") AS \"message_count\" FROM nsq_channel",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"topic\"", "\"topic\"",
"\"channel\"" "\"channel\""

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"topic_count\") AS \"topic_count\" FROM nsq_server", "query": "SELECT mean(\"topic_count\") AS \"topic_count\" FROM nsq_server",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -30,8 +28,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"server_count\") AS \"server_count\" FROM nsq_server", "query": "SELECT mean(\"server_count\") AS \"server_count\" FROM nsq_server",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"depth\") AS \"depth\" FROM nsq_topic", "query": "SELECT mean(\"depth\") AS \"depth\" FROM nsq_topic",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"topic\"" "\"topic\""
], ],
@ -32,8 +30,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"backend_depth\") AS \"backend_depth\" FROM nsq_topic", "query": "SELECT mean(\"backend_depth\") AS \"backend_depth\" FROM nsq_topic",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"topic\"" "\"topic\""
], ],
@ -51,8 +47,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"message_count\")) AS \"messages_per_second\" FROM nsq_topic", "query": "SELECT non_negative_derivative(max(\"message_count\")) AS \"messages_per_second\" FROM nsq_topic",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"topic\"", "\"topic\"",
"\"host\"" "\"host\""
@ -71,8 +65,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"message_count\")) - non_negative_derivative(max(\"depth\")) AS \"messages_per_second\" FROM nsq_topic", "query": "SELECT non_negative_derivative(max(\"message_count\")) - non_negative_derivative(max(\"depth\")) AS \"messages_per_second\" FROM nsq_topic",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"topic\"", "\"topic\"",
"\"host\"" "\"host\""

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "select max(\"percent_packet_loss\") as \"packet_loss\" from ping", "query": "select max(\"percent_packet_loss\") as \"packet_loss\" from ping",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],
@ -32,8 +30,6 @@
"queries": [ "queries": [
{ {
"query": "select mean(\"average_response_ms\") as \"average\", mean(\"minimum_response_ms\") as \"min\", mean(\"maximum_response_ms\") as \"max\" from ping", "query": "select mean(\"average_response_ms\") as \"average\", mean(\"minimum_response_ms\") as \"min\", mean(\"maximum_response_ms\") as \"max\" from ping",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"\"server\"" "\"server\""
], ],

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(mean(\"tup_fetched\")) AS \"fetched\", non_negative_derivative(mean(\"tup_returned\")) AS \"returned\", non_negative_derivative(mean(\"tup_inserted\")) AS \"inserted\", non_negative_derivative(mean(\"tup_updated\")) AS \"updated\" FROM postgresql", "query": "SELECT non_negative_derivative(mean(\"tup_fetched\")) AS \"fetched\", non_negative_derivative(mean(\"tup_returned\")) AS \"returned\", non_negative_derivative(mean(\"tup_inserted\")) AS \"inserted\", non_negative_derivative(mean(\"tup_updated\")) AS \"updated\" FROM postgresql",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"db" "db"
], ],
@ -32,8 +30,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(mean(\"xact_commit\")) AS \"xact_commit\" FROM postgresql", "query": "SELECT non_negative_derivative(mean(\"xact_commit\")) AS \"xact_commit\" FROM postgresql",
"db": "telegraf",
"rp": "",
"groupbys": [ "groupbys": [
"db" "db"
], ],
@ -51,10 +47,7 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"buffers_alloc\") AS \"buffers_allocated\", mean(\"buffers_backend\") AS \"buffers_backend\", mean(\"buffers_backend_fsync\") AS \"buffers_backend_fsync\", mean(\"buffers_checkpoint\") AS \"buffers_checkpoint\", mean(\"buffers_clean\") AS \"buffers_clean\" FROM postgresql", "query": "SELECT mean(\"buffers_alloc\") AS \"buffers_allocated\", mean(\"buffers_backend\") AS \"buffers_backend\", mean(\"buffers_backend_fsync\") AS \"buffers_backend_fsync\", mean(\"buffers_checkpoint\") AS \"buffers_checkpoint\", mean(\"buffers_clean\") AS \"buffers_clean\" FROM postgresql",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
} }
] ]
@ -69,10 +62,7 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"conflicts\") AS \"conflicts\", mean(\"deadlocks\") AS \"deadlocks\" FROM postgresql", "query": "SELECT mean(\"conflicts\") AS \"conflicts\", mean(\"deadlocks\") AS \"deadlocks\" FROM postgresql",
"db": "telegraf", "groupbys": [],
"rp": "",
"groupbys": [
],
"wheres": [] "wheres": []
} }
] ]

View File

@ -1,7 +1,7 @@
{ {
"id": "ffad2dff-d263-412e-806a-1e836af87942", "id": "ffad2dff-d263-412e-806a-1e836af87942",
"measurement": "processes", "measurement": "processes",
"app": "processes", "app": "system",
"cells": [ "cells": [
{ {
"x": 0, "x": 0,
@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"total\") AS \"total\" FROM processes", "query": "SELECT mean(\"total\") AS \"total\" FROM processes",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"clients\") AS \"clients\" FROM redis", "query": "SELECT mean(\"clients\") AS \"clients\" FROM redis",
"db": "telegraf",
"rp": "",
"groupbys": [] "groupbys": []
} }
] ]
@ -29,8 +27,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"blocked_clients\") AS \"blocked_clients\" FROM redis", "query": "SELECT mean(\"blocked_clients\") AS \"blocked_clients\" FROM redis",
"db": "telegraf",
"rp": "",
"groupbys": [] "groupbys": []
} }
] ]
@ -45,8 +41,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"used_cpu_user\")) AS \"used_cpu_per_second\" FROM redis", "query": "SELECT non_negative_derivative(max(\"used_cpu_user\")) AS \"used_cpu_per_second\" FROM redis",
"db": "telegraf",
"rp": "",
"groupbys": [] "groupbys": []
} }
] ]
@ -61,8 +55,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT non_negative_derivative(max(\"used_memory\")) AS \"used_memory_per_second\" FROM redis", "query": "SELECT non_negative_derivative(max(\"used_memory\")) AS \"used_memory_per_second\" FROM redis",
"db": "telegraf",
"rp": "",
"groupbys": [] "groupbys": []
} }
] ]

View File

@ -13,9 +13,9 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"memory_total\") as memory_total_bytes FROM riak", "query": "SELECT max(\"memory_total\") as memory_total_bytes FROM riak",
"db": "telegraf", "groupbys": [
"rp": "", "\"nodename\""
"groupbys": ["\"nodename\""], ],
"wheres": [] "wheres": []
} }
] ]
@ -30,9 +30,9 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"node_get_fsm_objsize_median\") AS \"median\", max(\"node_get_fsm_objsize_100\") AS \"100th-percentile\", max(\"node_get_fsm_objsize_99\") AS \"99th-percentile\", max(\"node_get_fsm_objsize_mean\") AS \"mean\", max(\"node_get_fsm_objsize_95\") AS \"95th-percentile\" FROM riak", "query": "SELECT max(\"node_get_fsm_objsize_median\") AS \"median\", max(\"node_get_fsm_objsize_100\") AS \"100th-percentile\", max(\"node_get_fsm_objsize_99\") AS \"99th-percentile\", max(\"node_get_fsm_objsize_mean\") AS \"mean\", max(\"node_get_fsm_objsize_95\") AS \"95th-percentile\" FROM riak",
"db": "telegraf", "groupbys": [
"rp": "", "\"nodename\""
"groupbys": ["\"nodename\""], ],
"wheres": [] "wheres": []
} }
] ]
@ -47,9 +47,9 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"node_get_fsm_siblings_median\") AS \"median\", max(\"node_get_fsm_siblings_mean\") AS \"mean\", max(\"node_get_fsm_siblings_99\") AS \"99th-percentile\", max(\"node_get_fsm_siblings_95\") AS \"95h-percentile\", max(\"node_get_fsm_siblings_100\") AS \"100th-percentile\" FROM riak", "query": "SELECT max(\"node_get_fsm_siblings_median\") AS \"median\", max(\"node_get_fsm_siblings_mean\") AS \"mean\", max(\"node_get_fsm_siblings_99\") AS \"99th-percentile\", max(\"node_get_fsm_siblings_95\") AS \"95h-percentile\", max(\"node_get_fsm_siblings_100\") AS \"100th-percentile\" FROM riak",
"db": "telegraf", "groupbys": [
"rp": "", "\"nodename\""
"groupbys": ["\"nodename\""], ],
"wheres": [] "wheres": []
} }
] ]
@ -64,9 +64,9 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"node_put_fsm_time_median\") / 1000 AS \"median_put_milliseconds\", max(\"node_get_fsm_time_median\") / 1000 AS \"median_get_milliseconds\" FROM riak", "query": "SELECT max(\"node_put_fsm_time_median\") / 1000 AS \"median_put_milliseconds\", max(\"node_get_fsm_time_median\") / 1000 AS \"median_get_milliseconds\" FROM riak",
"db": "telegraf", "groupbys": [
"rp": "", "\"nodename\""
"groupbys": ["\"nodename\""], ],
"wheres": [] "wheres": []
} }
] ]
@ -81,9 +81,9 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"node_puts\") AS \"puts_per_minute\", max(\"node_gets\") AS \"gets_per_minute\" FROM riak", "query": "SELECT max(\"node_puts\") AS \"puts_per_minute\", max(\"node_gets\") AS \"gets_per_minute\" FROM riak",
"db": "telegraf", "groupbys": [
"rp": "", "\"nodename\""
"groupbys": ["\"nodename\""], ],
"wheres": [] "wheres": []
} }
] ]
@ -98,9 +98,9 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"pbc_active\") AS \"active_protobuf_connections\" FROM riak", "query": "SELECT max(\"pbc_active\") AS \"active_protobuf_connections\" FROM riak",
"db": "telegraf", "groupbys": [
"rp": "", "\"nodename\""
"groupbys": ["\"nodename\""], ],
"wheres": [] "wheres": []
} }
] ]
@ -115,9 +115,9 @@
"queries": [ "queries": [
{ {
"query": "SELECT max(\"read_repairs\") AS \"read_repairs_per_minute\" FROM riak", "query": "SELECT max(\"read_repairs\") AS \"read_repairs_per_minute\" FROM riak",
"db": "telegraf", "groupbys": [
"rp": "", "\"nodename\""
"groupbys": ["\"nodename\""], ],
"wheres": [] "wheres": []
} }
] ]

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "select non_negative_derivative(mean(cache_hit)) as hits, non_negative_derivative(mean(cache_miss)) as misses from varnish", "query": "select non_negative_derivative(mean(cache_hit)) as hits, non_negative_derivative(mean(cache_miss)) as misses from varnish",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"Percent_Processor_Time\") AS \"percent_processor_time\" FROM win_cpu", "query": "SELECT mean(\"Percent_Processor_Time\") AS \"percent_processor_time\" FROM win_cpu",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"Available_Bytes\") AS \"available_bytes\" FROM win_mem", "query": "SELECT mean(\"Available_Bytes\") AS \"available_bytes\" FROM win_mem",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"Bytes_Sent_persec\") AS \"bytes_sent\" FROM win_net", "query": "SELECT mean(\"Bytes_Sent_persec\") AS \"bytes_sent\" FROM win_net",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }
@ -30,8 +28,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"Bytes_Received_persec\") AS \"bytes_received\" FROM win_net", "query": "SELECT mean(\"Bytes_Received_persec\") AS \"bytes_received\" FROM win_net",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,8 +13,6 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"Processor_Queue_Length\") AS \"load\" FROM win_system", "query": "SELECT mean(\"Processor_Queue_Length\") AS \"load\" FROM win_system",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,22 +13,16 @@
"queries": [ "queries": [
{ {
"query": "SELECT mean(\"Get_Requests_persec\") AS \"gets\" FROM win_websvc", "query": "SELECT mean(\"Get_Requests_persec\") AS \"gets\" FROM win_websvc",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}, },
{ {
"query": "SELECT mean(\"Post_Requests_persec\") AS \"posts\" FROM win_websvc", "query": "SELECT mean(\"Post_Requests_persec\") AS \"posts\" FROM win_websvc",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
}, },
{ {
"query": "SELECT mean(\"Current_Connections\") AS \"connections\" FROM win_websvc", "query": "SELECT mean(\"Current_Connections\") AS \"connections\" FROM win_websvc",
"db": "telegraf",
"rp": "",
"groupbys": [], "groupbys": [],
"wheres": [] "wheres": []
} }

View File

@ -13,6 +13,7 @@ const (
ErrSourceNotFound = Error("source not found") ErrSourceNotFound = Error("source not found")
ErrServerNotFound = Error("server not found") ErrServerNotFound = Error("server not found")
ErrLayoutNotFound = Error("layout not found") ErrLayoutNotFound = Error("layout not found")
ErrUserNotFound = Error("user not found")
ErrLayoutInvalid = Error("layout is invalid") ErrLayoutInvalid = Error("layout is invalid")
ErrAlertNotFound = Error("alert not found") ErrAlertNotFound = Error("alert not found")
ErrAuthentication = Error("user not authenticated") ErrAuthentication = Error("user not authenticated")
@ -55,8 +56,8 @@ type TimeSeries interface {
// Query retrieves a Response from a TimeSeries. // Query retrieves a Response from a TimeSeries.
type Query struct { type Query struct {
Command string `json:"query"` // Command is the query itself Command string `json:"query"` // Command is the query itself
DB string `json:"db"` // DB is optional and if empty will not be used. DB string `json:"db,omitempty"` // DB is optional and if empty will not be used.
RP string `json:"rp"` // RP is a retention policy and optional; if empty will not be used. RP string `json:"rp,omitempty"` // RP is a retention policy and optional; if empty will not be used.
Wheres []string `json:"wheres"` // Wheres restricts the query to certain attributes Wheres []string `json:"wheres"` // Wheres restricts the query to certain attributes
GroupBys []string `json:"groupbys"` // GroupBys collate the query by these tags GroupBys []string `json:"groupbys"` // GroupBys collate the query by these tags
} }
@ -75,6 +76,7 @@ type Source struct {
Password string `json:"password,omitempty"` // Password is in CLEARTEXT Password string `json:"password,omitempty"` // Password is in CLEARTEXT
URL string `json:"url"` // URL are the connections to the source URL string `json:"url"` // URL are the connections to the source
Default bool `json:"default"` // Default specifies the default source for the application Default bool `json:"default"` // Default specifies the default source for the application
Telegraf string `json:"telegraf"` // Telegraf is the db telegraf is written to. By default it is "telegraf"
} }
// SourcesStore stores connection information for a `TimeSeries` // SourcesStore stores connection information for a `TimeSeries`
@ -195,23 +197,22 @@ type UserID int
// User represents an authenticated user. // User represents an authenticated user.
type User struct { type User struct {
ID UserID ID UserID `json:"id"`
Name string Email string `json:"email"`
} }
// AuthStore is the Storage and retrieval of authentication information // UsersStore is the Storage and retrieval of authentication information
type AuthStore struct { type UsersStore interface {
// User management for the AuthStore // Create a new User in the UsersStore
Users interface { Add(context.Context, *User) (*User, error)
// Create a new User in the AuthStore // Delete the User from the UsersStore
Add(context.Context, User) error Delete(context.Context, *User) error
// Delete the User from the AuthStore // Get retrieves a user if `ID` exists.
Delete(context.Context, User) error Get(ctx context.Context, ID UserID) (*User, error)
// Retrieve a user if `ID` exists.
Get(ctx context.Context, ID int) error
// Update the user's permissions or roles // Update the user's permissions or roles
Update(context.Context, User) error Update(context.Context, *User) error
} // FindByEmail will retrieve a user by email address.
FindByEmail(ctx context.Context, Email string) (*User, error)
} }
// ExplorationID is a unique ID for an Exploration. // ExplorationID is a unique ID for an Exploration.

View File

@ -2,31 +2,63 @@
machine: machine:
services: services:
- docker - docker
post: environment:
- go version DOCKER_TAG: chronograf-20161121
- go version | grep 1.7.3 || (sudo rm -rf /usr/local/go && wget https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz && sudo tar -C /usr/local -xzf go1.7.3.linux-amd64.tar.gz)
- go version
dependencies: dependencies:
pre: override:
- npm install -g node-sass - ./etc/scripts/docker/pull.sh
- git config --global url."git@github.com:".insteadOf "https://github.com/"
- mkdir -p ${HOME}/.go_workspace/src/github.com/influxdata
- ln -sf ${HOME}/chronograf ${HOME}/.go_workspace/src/github.com/influxdata
- "make clean":
pwd: ../.go_workspace/src/github.com/influxdata/chronograf
- "make":
pwd: ../.go_workspace/src/github.com/influxdata/chronograf
test: test:
override: override:
- make test - >
./etc/scripts/docker/run.sh
--test
--no-build
deployment: deployment:
quayio: master:
branch: master branch: master
commands: commands:
- make docker - >
./etc/scripts/docker/run.sh
--clean
--package
--platform all
--arch all
--upload
- sudo chown -R ubuntu:ubuntu /home/ubuntu
- cp build/linux/static_amd64/chronograf .
- docker build -t chronograf .
- docker login -e $QUAY_EMAIL -u "$QUAY_USER" -p $QUAY_PASS quay.io - docker login -e $QUAY_EMAIL -u "$QUAY_USER" -p $QUAY_PASS quay.io
- docker tag chronograf quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7} - docker tag chronograf quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7}
- docker push quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7} - docker push quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7}
- mv ./build/* $CIRCLE_ARTIFACTS
pre-release:
tag: /^[0-9]+(\.[0-9]+)*(\S*)([a|rc|beta]([0-9]+))+$/
commands:
- >
./etc/scripts/docker/run.sh
--clean
--release
--package
--platform all
--arch all
--upload
--bucket dl.influxdata.com/chronograf/releases
- sudo chown -R ubuntu:ubuntu /home/ubuntu
- mv ./build/* $CIRCLE_ARTIFACTS
release:
tag: /^[0-9]+(\.[0-9]+)*$/
commands:
- >
./etc/scripts/docker/run.sh
--clean
--release
--package
--platform all
--arch all
--upload
--bucket dl.influxdata.com/chronograf/releases
- sudo chown -R ubuntu:ubuntu /home/ubuntu
- mv ./build/* $CIRCLE_ARTIFACTS

View File

@ -6,7 +6,7 @@ It makes owning the monitoring and alerting for your infrastructure easy to setu
The next sections will get you up and running with Chronograf with as little configuration and The next sections will get you up and running with Chronograf with as little configuration and
code as possible. code as possible.
By the end of this document you will have downloaded, installed, and configured all four packages of the By the end of this document you will have downloaded, installed, and configured all four packages of the
TICK stack ([Telegraf](https://github.com/influxdata/telegraf), [InfluxDB](https://github.com/influxdata/influxdb), Chronograf, and [Kapacitor](https://github.com/influxdata/kapacitor)), and you will be all set to monitor you infrastructure. TICK stack ([Telegraf](https://github.com/influxdata/telegraf), [InfluxDB](https://github.com/influxdata/influxdb), Chronograf, and [Kapacitor](https://github.com/influxdata/kapacitor)), and you will be all set to monitor your infrastructure.
## Operating System Support ## Operating System Support
Chronograf and the other components of the TICK stack are supported on a large number of operating systems and hardware architectures. Chronograf and the other components of the TICK stack are supported on a large number of operating systems and hardware architectures.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

After

Width:  |  Height:  |  Size: 93 KiB

9
docs/proto.md Normal file
View File

@ -0,0 +1,9 @@
download a binary here https://github.com/google/protobuf/releases/tag/v3.1.0
run the following 4 commands listed here https://github.com/gogo/protobuf
```sh
go get github.com/gogo/protobuf/proto
go get github.com/gogo/protobuf/jsonpb
go get github.com/gogo/protobuf/protoc-gen-gogo
go get github.com/gogo/protobuf/gogoproto
```

View File

@ -11,6 +11,7 @@ import hashlib
import re import re
import logging import logging
import argparse import argparse
import json
################ ################
#### Chronograf Variables #### Chronograf Variables
@ -147,7 +148,6 @@ def run_generate():
"""Generate static assets. """Generate static assets.
""" """
logging.info("Generating static assets...") logging.info("Generating static assets...")
run("make dep", shell=True)
run("make assets", shell=True) run("make assets", shell=True)
return True return True
@ -157,74 +157,45 @@ def go_get(branch, update=False, no_uncommitted=False):
if local_changes() and no_uncommitted: if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.") logging.error("There are uncommitted changes in the current directory.")
return False return False
if not check_path_for("gdm"): run("make dep", shell=True)
logging.info("Downloading `gdm`...")
get_command = "go get github.com/sparrc/gdm"
run(get_command)
logging.info("Retrieving dependencies with `gdm`...")
sys.stdout.flush()
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH")))
return True return True
def run_tests(race, parallel, timeout, no_vet): def run_tests(race, parallel, timeout, no_vet):
"""Run the Go test suite on binary output. """Run the Go test suite on binary output.
""" """
logging.info("Starting tests...")
if race:
logging.info("Race is enabled.")
if parallel is not None:
logging.info("Using parallel: {}".format(parallel))
if timeout is not None:
logging.info("Using timeout: {}".format(timeout))
out = run("go fmt ./...")
if len(out) > 0:
logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.")
logging.error("{}".format(out))
return False
if not no_vet:
logging.info("Running 'go vet'...")
out = run(go_vet_command)
if len(out) > 0:
logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.")
logging.error("{}".format(out))
return False
else:
logging.info("Skipping 'go vet' call...")
test_command = "go test -v"
if race:
test_command += " -race"
if parallel is not None:
test_command += " -parallel {}".format(parallel)
if timeout is not None:
test_command += " -timeout {}".format(timeout)
test_command += " ./..."
logging.info("Running tests...") logging.info("Running tests...")
output = run(test_command) run("make test", shell=True, print_output=True)
logging.debug("Test output:\n{}".format(output.encode('ascii', 'ignore')))
return True return True
################ ################
#### All Chronograf-specific content above this line #### All Chronograf-specific content above this line
################ ################
def run(command, allow_failure=False, shell=False): def run(command, allow_failure=False, shell=False, print_output=False):
"""Run shell command (convenience wrapper around subprocess). """Run shell command (convenience wrapper around subprocess).
""" """
out = None out = None
logging.debug("{}".format(command)) logging.debug("{}".format(command))
try: try:
if shell: cmd = command
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) if not shell:
else: cmd = command.split()
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
if print_output:
stdout = None
p = subprocess.Popen(cmd, shell=shell, stdout=stdout, stderr=stderr)
out, _ = p.communicate()
if out is not None:
out = out.decode('utf-8').strip() out = out.decode('utf-8').strip()
# logging.debug("Command output: {}".format(out)) if p.returncode != 0:
except subprocess.CalledProcessError as e:
if allow_failure: if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e.output)) logging.warn(u"Command '{}' failed with error: {}".format(command, out))
return None return None
else: else:
logging.error("Command '{}' failed with error: {}".format(command, e.output)) logging.error(u"Command '{}' failed with error: {}".format(command, out))
sys.exit(1) sys.exit(1)
except OSError as e: except OSError as e:
if allow_failure: if allow_failure:
@ -767,6 +738,9 @@ def main(args):
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet): if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
return 1 return 1
if args.no_build:
return 0
platforms = [] platforms = []
single_build = True single_build = True
if args.platform == 'all': if args.platform == 'all':
@ -828,10 +802,54 @@ def main(args):
args.upload_overwrite = True args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite): if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1 return 1
logging.info("Packages created:") package_output = {}
package_output["version"] = args.version
for p in packages: for p in packages:
logging.info("{} (MD5={})".format(p.split('/')[-1:][0], p_name = p.split('/')[-1:][0]
generate_md5_from_file(p))) if ".asc" in p_name:
# Skip public keys
continue
arch = None
type = None
regex = None
if ".deb" in p_name:
type = "ubuntu"
regex = r"^.+_(.+)\.deb$"
elif ".rpm" in p_name:
type = "centos"
regex = r"^.+\.(.+)\.rpm$"
elif ".tar.gz" in p_name:
if "linux" in p_name:
if "static" in p_name:
type = "linux_static"
else:
type = "linux"
elif "darwin" in p_name:
type = "darwin"
regex = r"^.+_(.+)\.tar.gz$"
elif ".zip" in p_name:
if "windows" in p_name:
type = "windows"
regex = r"^.+_(.+)\.zip$"
if regex is None or type is None:
logging.error("Could not determine package type for: {}".format(p))
return 1
match = re.search(regex, p_name)
arch = match.groups()[0]
if arch is None:
logging.error("Could not determine arch for: {}".format(p))
return 1
if arch == "x86_64":
arch = "amd64"
elif arch == "x86_32":
arch = "i386"
package_output[str(arch) + "_" + str(type)] = {
"md5": generate_md5_from_file(p),
"filename": p_name,
}
logging.info(json.dumps(package_output, sort_keys=True, indent=4))
if orig_branch != get_current_branch(): if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(orig_branch)) logging.info("Moving back to original git branch: {}".format(orig_branch))
run("git checkout {}".format(orig_branch)) run("git checkout {}".format(orig_branch))
@ -964,6 +982,9 @@ if __name__ == '__main__':
metavar='<timeout>', metavar='<timeout>',
type=str, type=str,
help='Timeout for tests before failing') help='Timeout for tests before failing')
parser.add_argument('--no-build',
action='store_true',
help='Dont build anything.')
args = parser.parse_args() args = parser.parse_args()
print_banner() print_banner()
sys.exit(main(args)) sys.exit(main(args))

8
etc/scripts/docker/build.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
set -x
docker_tag="chronograf-$(date +%Y%m%d)"
docker build --rm=false -f etc/Dockerfile_build -t builder:$docker_tag .
docker tag builder:$docker_tag quay.io/influxdb/builder:$docker_tag
docker push quay.io/influxdb/builder:$docker_tag

11
etc/scripts/docker/pull.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
#
# Pull the required build image from quay.io.
#
if [[ -z "$DOCKER_TAG" ]]; then
echo "Please specify a tag to pull from with the DOCKER_TAG env variable."
exit 1
fi
docker pull quay.io/influxdb/builder:$DOCKER_TAG

26
etc/scripts/docker/run.sh Executable file
View File

@ -0,0 +1,26 @@
#!/bin/bash
#
# Pass all CLI arguments to Chronograf builder Docker image (passing
# them to the build scripts)
#
# WARNING: This script passes your SSH and AWS credentials within the
# Docker image, so use with caution.
#
set -e
# Default SSH key to $HOME/.ssh/id_rsa if not set
test -z $SSH_KEY_PATH && SSH_KEY_PATH="$HOME/.ssh/id_rsa"
echo "Using SSH key located at: $SSH_KEY_PATH"
# Default docker tag if not specified
test -z "$DOCKER_TAG" && DOCKER_TAG="chronograf-20161121"
docker run \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-v $SSH_KEY_PATH:/root/.ssh/id_rsa \
-v ~/.ssh/known_hosts:/root/.ssh/known_hosts \
-v $(pwd):/root/go/src/github.com/influxdata/chronograf \
quay.io/influxdb/builder:$DOCKER_TAG \
"$@"

View File

@ -45,14 +45,14 @@ type explorations struct {
func (h *Service) Explorations(w http.ResponseWriter, r *http.Request) { func (h *Service) Explorations(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r) id, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
mrExs, err := h.ExplorationStore.Query(ctx, chronograf.UserID(id)) mrExs, err := h.ExplorationStore.Query(ctx, chronograf.UserID(id))
if err != nil { if err != nil {
unknownErrorWithMessage(w, err) unknownErrorWithMessage(w, err, h.Logger)
return return
} }
@ -71,20 +71,20 @@ func (h *Service) Explorations(w http.ResponseWriter, r *http.Request) {
func (h *Service) ExplorationsID(w http.ResponseWriter, r *http.Request) { func (h *Service) ExplorationsID(w http.ResponseWriter, r *http.Request) {
eID, err := paramID("eid", r) eID, err := paramID("eid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
uID, err := paramID("id", r) uID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(eID)) e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(eID))
if err != nil || e.UserID != chronograf.UserID(uID) { if err != nil || e.UserID != chronograf.UserID(uID) {
notFound(w, eID) notFound(w, eID, h.Logger)
return return
} }
@ -101,26 +101,26 @@ type patchExplorationRequest struct {
func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) { func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) {
id, err := paramID("eid", r) id, err := paramID("eid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
uID, err := paramID("id", r) uID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(id)) e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(id))
if err != nil || e.UserID != chronograf.UserID(uID) { if err != nil || e.UserID != chronograf.UserID(uID) {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
var req patchExplorationRequest var req patchExplorationRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
@ -128,7 +128,7 @@ func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) {
var ok bool var ok bool
if e.Data, ok = req.Data.(string); !ok { if e.Data, ok = req.Data.(string); !ok {
err := fmt.Errorf("Error: Exploration data is not a string") err := fmt.Errorf("Error: Exploration data is not a string")
invalidData(w, err) invalidData(w, err, h.Logger)
return return
} }
} }
@ -139,7 +139,7 @@ func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) {
if err := h.ExplorationStore.Update(ctx, e); err != nil { if err := h.ExplorationStore.Update(ctx, e); err != nil {
msg := "Error: Failed to update Exploration" msg := "Error: Failed to update Exploration"
Error(w, http.StatusInternalServerError, msg) Error(w, http.StatusInternalServerError, msg, h.Logger)
return return
} }
@ -156,14 +156,14 @@ type postExplorationRequest struct {
func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) { func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) {
uID, err := paramID("id", r) uID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
// TODO: Check user if user exists. // TODO: Check user if user exists.
var req postExplorationRequest var req postExplorationRequest
if err = json.NewDecoder(r.Body).Decode(&req); err != nil { if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
@ -182,7 +182,7 @@ func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) {
e, err = h.ExplorationStore.Add(ctx, e) e, err = h.ExplorationStore.Add(ctx, e)
if err != nil { if err != nil {
msg := fmt.Errorf("Error: Failed to save Exploration") msg := fmt.Errorf("Error: Failed to save Exploration")
unknownErrorWithMessage(w, msg) unknownErrorWithMessage(w, msg, h.Logger)
return return
} }
@ -195,25 +195,25 @@ func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) {
func (h *Service) RemoveExploration(w http.ResponseWriter, r *http.Request) { func (h *Service) RemoveExploration(w http.ResponseWriter, r *http.Request) {
eID, err := paramID("eid", r) eID, err := paramID("eid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
uID, err := paramID("id", r) uID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(eID)) e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(eID))
if err != nil || e.UserID != chronograf.UserID(uID) { if err != nil || e.UserID != chronograf.UserID(uID) {
notFound(w, eID) notFound(w, eID, h.Logger)
return return
} }
if err := h.ExplorationStore.Delete(ctx, &chronograf.Exploration{ID: chronograf.ExplorationID(eID)}); err != nil { if err := h.ExplorationStore.Delete(ctx, &chronograf.Exploration{ID: chronograf.ExplorationID(eID)}); err != nil {
unknownErrorWithMessage(w, err) unknownErrorWithMessage(w, err, h.Logger)
return return
} }
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)

View File

@ -55,24 +55,24 @@ type kapacitor struct {
func (h *Service) NewKapacitor(w http.ResponseWriter, r *http.Request) { func (h *Service) NewKapacitor(w http.ResponseWriter, r *http.Request) {
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
_, err = h.SourcesStore.Get(ctx, srcID) _, err = h.SourcesStore.Get(ctx, srcID)
if err != nil { if err != nil {
notFound(w, srcID) notFound(w, srcID, h.Logger)
return return
} }
var req postKapacitorRequest var req postKapacitorRequest
if err = json.NewDecoder(r.Body).Decode(&req); err != nil { if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
if err := req.Valid(); err != nil { if err := req.Valid(); err != nil {
invalidData(w, err) invalidData(w, err, h.Logger)
return return
} }
@ -86,7 +86,7 @@ func (h *Service) NewKapacitor(w http.ResponseWriter, r *http.Request) {
if srv, err = h.ServersStore.Add(ctx, srv); err != nil { if srv, err = h.ServersStore.Add(ctx, srv); err != nil {
msg := fmt.Errorf("Error storing kapacitor %v: %v", req, err) msg := fmt.Errorf("Error storing kapacitor %v: %v", req, err)
unknownErrorWithMessage(w, msg) unknownErrorWithMessage(w, msg, h.Logger)
return return
} }
@ -120,7 +120,7 @@ func (h *Service) Kapacitors(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
mrSrvs, err := h.ServersStore.All(ctx) mrSrvs, err := h.ServersStore.All(ctx)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, "Error loading kapacitors") Error(w, http.StatusInternalServerError, "Error loading kapacitors", h.Logger)
return return
} }
@ -140,20 +140,20 @@ func (h *Service) Kapacitors(w http.ResponseWriter, r *http.Request) {
func (h *Service) KapacitorsID(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorsID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r) id, err := paramID("kid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id) srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID { if err != nil || srv.SrcID != srcID {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
@ -165,25 +165,25 @@ func (h *Service) KapacitorsID(w http.ResponseWriter, r *http.Request) {
func (h *Service) RemoveKapacitor(w http.ResponseWriter, r *http.Request) { func (h *Service) RemoveKapacitor(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r) id, err := paramID("kid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id) srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID { if err != nil || srv.SrcID != srcID {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
if err = h.ServersStore.Delete(ctx, srv); err != nil { if err = h.ServersStore.Delete(ctx, srv); err != nil {
unknownErrorWithMessage(w, err) unknownErrorWithMessage(w, err, h.Logger)
return return
} }
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
@ -213,31 +213,31 @@ func (p *patchKapacitorRequest) Valid() error {
func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) { func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r) id, err := paramID("kid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id) srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID { if err != nil || srv.SrcID != srcID {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
var req patchKapacitorRequest var req patchKapacitorRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
if err := req.Valid(); err != nil { if err := req.Valid(); err != nil {
invalidData(w, err) invalidData(w, err, h.Logger)
return return
} }
@ -256,7 +256,7 @@ func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) {
if err := h.ServersStore.Update(ctx, srv); err != nil { if err := h.ServersStore.Update(ctx, srv); err != nil {
msg := fmt.Sprintf("Error updating kapacitor ID %d", id) msg := fmt.Sprintf("Error updating kapacitor ID %d", id)
Error(w, http.StatusInternalServerError, msg) Error(w, http.StatusInternalServerError, msg, h.Logger)
return return
} }
@ -268,20 +268,20 @@ func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) {
func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r) id, err := paramID("kid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id) srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID { if err != nil || srv.SrcID != srcID {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
@ -295,7 +295,7 @@ func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) {
var req chronograf.AlertRule var req chronograf.AlertRule
if err = json.NewDecoder(r.Body).Decode(&req); err != nil { if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
// TODO: validate this data // TODO: validate this data
@ -308,13 +308,13 @@ func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) {
task, err := c.Create(ctx, req) task, err := c.Create(ctx, req)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
req.ID = task.ID req.ID = task.ID
rule, err := h.AlertRulesStore.Add(ctx, srcID, id, req) rule, err := h.AlertRulesStore.Add(ctx, srcID, id, req)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
@ -348,20 +348,20 @@ type alertResponse struct {
func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r) id, err := paramID("kid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id) srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID { if err != nil || srv.SrcID != srcID {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
@ -374,7 +374,7 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) {
} }
var req chronograf.AlertRule var req chronograf.AlertRule
if err = json.NewDecoder(r.Body).Decode(&req); err != nil { if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
// TODO: validate this data // TODO: validate this data
@ -388,22 +388,22 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) {
// Check if the rule exists and is scoped correctly // Check if the rule exists and is scoped correctly
if _, err := h.AlertRulesStore.Get(ctx, srcID, id, tid); err != nil { if _, err := h.AlertRulesStore.Get(ctx, srcID, id, tid); err != nil {
if err == chronograf.ErrAlertNotFound { if err == chronograf.ErrAlertNotFound {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
req.ID = tid req.ID = tid
task, err := c.Update(ctx, c.Href(tid), req) task, err := c.Update(ctx, c.Href(tid), req)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
if err := h.AlertRulesStore.Update(ctx, srcID, id, req); err != nil { if err := h.AlertRulesStore.Update(ctx, srcID, id, req); err != nil {
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
@ -423,26 +423,26 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) {
func (h *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r) id, err := paramID("kid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id) srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID { if err != nil || srv.SrcID != srcID {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
rules, err := h.AlertRulesStore.All(ctx, srcID, id) rules, err := h.AlertRulesStore.All(ctx, srcID, id)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
@ -454,7 +454,7 @@ func (h *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) {
for _, rule := range rules { for _, rule := range rules {
tickscript, err := ticker.Generate(rule) tickscript, err := ticker.Generate(rule)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
@ -476,24 +476,24 @@ type allAlertsResponse struct {
Rules []alertResponse `json:"rules"` Rules []alertResponse `json:"rules"`
} }
// KapacitorRulesGet retrieves specific task // KapacitorRulesID retrieves specific task
func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r) id, err := paramID("kid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id) srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID { if err != nil || srv.SrcID != srcID {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
tid := httprouter.GetParamFromContext(ctx, "tid") tid := httprouter.GetParamFromContext(ctx, "tid")
@ -501,10 +501,10 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) {
rule, err := h.AlertRulesStore.Get(ctx, srcID, id, tid) rule, err := h.AlertRulesStore.Get(ctx, srcID, id, tid)
if err != nil { if err != nil {
if err == chronograf.ErrAlertNotFound { if err == chronograf.ErrAlertNotFound {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
@ -512,7 +512,7 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) {
c := kapa.Client{} c := kapa.Client{}
tickscript, err := ticker.Generate(rule) tickscript, err := ticker.Generate(rule)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
@ -528,24 +528,24 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) {
encodeJSON(w, http.StatusOK, res, h.Logger) encodeJSON(w, http.StatusOK, res, h.Logger)
} }
// KapacitosRulesDelete proxies DELETE to kapacitor // KapacitorRulesDelete proxies DELETE to kapacitor
func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r) id, err := paramID("kid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id) srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID { if err != nil || srv.SrcID != srcID {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
@ -554,10 +554,10 @@ func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) {
// Check if the rule is linked to this server and kapacitor // Check if the rule is linked to this server and kapacitor
if _, err := h.AlertRulesStore.Get(ctx, srcID, id, tid); err != nil { if _, err := h.AlertRulesStore.Get(ctx, srcID, id, tid); err != nil {
if err == chronograf.ErrAlertNotFound { if err == chronograf.ErrAlertNotFound {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
@ -567,12 +567,12 @@ func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) {
Password: srv.Password, Password: srv.Password,
} }
if err := c.Delete(ctx, c.Href(tid)); err != nil { if err := c.Delete(ctx, c.Href(tid)); err != nil {
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }
if err := h.AlertRulesStore.Delete(ctx, srcID, id, chronograf.AlertRule{ID: tid}); err != nil { if err := h.AlertRulesStore.Delete(ctx, srcID, id, chronograf.AlertRule{ID: tid}); err != nil {
Error(w, http.StatusInternalServerError, err.Error()) Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return return
} }

View File

@ -32,19 +32,19 @@ func newLayoutResponse(layout chronograf.Layout) layoutResponse {
func (h *Service) NewLayout(w http.ResponseWriter, r *http.Request) { func (h *Service) NewLayout(w http.ResponseWriter, r *http.Request) {
var layout chronograf.Layout var layout chronograf.Layout
if err := json.NewDecoder(r.Body).Decode(&layout); err != nil { if err := json.NewDecoder(r.Body).Decode(&layout); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
if err := ValidLayoutRequest(layout); err != nil { if err := ValidLayoutRequest(layout); err != nil {
invalidData(w, err) invalidData(w, err, h.Logger)
return return
} }
var err error var err error
if layout, err = h.LayoutStore.Add(r.Context(), layout); err != nil { if layout, err = h.LayoutStore.Add(r.Context(), layout); err != nil {
msg := fmt.Errorf("Error storing layout %v: %v", layout, err) msg := fmt.Errorf("Error storing layout %v: %v", layout, err)
unknownErrorWithMessage(w, msg) unknownErrorWithMessage(w, msg, h.Logger)
return return
} }
@ -72,7 +72,7 @@ func (h *Service) Layouts(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
layouts, err := h.LayoutStore.All(ctx) layouts, err := h.LayoutStore.All(ctx)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, "Error loading layouts") Error(w, http.StatusInternalServerError, "Error loading layouts", h.Logger)
return return
} }
@ -104,7 +104,7 @@ func (h *Service) LayoutsID(w http.ResponseWriter, r *http.Request) {
layout, err := h.LayoutStore.Get(ctx, id) layout, err := h.LayoutStore.Get(ctx, id)
if err != nil { if err != nil {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id)) Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id), h.Logger)
return return
} }
@ -122,7 +122,7 @@ func (h *Service) RemoveLayout(w http.ResponseWriter, r *http.Request) {
} }
if err := h.LayoutStore.Delete(ctx, layout); err != nil { if err := h.LayoutStore.Delete(ctx, layout); err != nil {
unknownErrorWithMessage(w, err) unknownErrorWithMessage(w, err, h.Logger)
return return
} }
@ -136,25 +136,25 @@ func (h *Service) UpdateLayout(w http.ResponseWriter, r *http.Request) {
_, err := h.LayoutStore.Get(ctx, id) _, err := h.LayoutStore.Get(ctx, id)
if err != nil { if err != nil {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id)) Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id), h.Logger)
return return
} }
var req chronograf.Layout var req chronograf.Layout
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
req.ID = id req.ID = id
if err := ValidLayoutRequest(req); err != nil { if err := ValidLayoutRequest(req); err != nil {
invalidData(w, err) invalidData(w, err, h.Logger)
return return
} }
if err := h.LayoutStore.Update(ctx, req); err != nil { if err := h.LayoutStore.Update(ctx, req); err != nil {
msg := fmt.Sprintf("Error updating layout ID %s: %v", id, err) msg := fmt.Sprintf("Error updating layout ID %s: %v", id, err)
Error(w, http.StatusInternalServerError, msg) Error(w, http.StatusInternalServerError, msg, h.Logger)
return return
} }

View File

@ -16,7 +16,7 @@ func (h *Service) GetMappings(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
layouts, err := h.LayoutStore.All(ctx) layouts, err := h.LayoutStore.All(ctx)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, "Error loading layouts") Error(w, http.StatusInternalServerError, "Error loading layouts", h.Logger)
return return
} }

View File

@ -8,7 +8,7 @@ import (
"strings" "strings"
"github.com/bouk/httprouter" "github.com/bouk/httprouter"
"github.com/influxdata/chronograf" // When julienschmidt/httprouter v2 w/ context is out, switch "github.com/influxdata/chronograf "github.com/influxdata/chronograf" // When julienschmidt/httprouter v2 w/ context is out, switch
"github.com/influxdata/chronograf/jwt" "github.com/influxdata/chronograf/jwt"
) )
@ -94,14 +94,13 @@ func NewMux(opts MuxOpts, service Service) http.Handler {
router.DELETE("/chronograf/v1/layouts/:id", service.RemoveLayout) router.DELETE("/chronograf/v1/layouts/:id", service.RemoveLayout)
// Users // Users
/* router.GET("/chronograf/v1/me", service.Me)
router.GET("/chronograf/v1/users", Users) router.POST("/chronograf/v1/users", service.NewUser)
router.POST("/chronograf/v1/users", NewUser)
router.GET("/chronograf/v1/users/:id", service.UserID)
router.PATCH("/chronograf/v1/users/:id", service.UpdateUser)
router.DELETE("/chronograf/v1/users/:id", service.RemoveUser)
router.GET("/chronograf/v1/users/:id", UsersID)
router.PATCH("/chronograf/v1/users/:id", UpdateUser)
router.DELETE("/chronograf/v1/users/:id", RemoveUser)
*/
// Explorations // Explorations
router.GET("/chronograf/v1/users/:id/explorations", service.Explorations) router.GET("/chronograf/v1/users/:id/explorations", service.Explorations)
router.POST("/chronograf/v1/users/:id/explorations", service.NewExploration) router.POST("/chronograf/v1/users/:id/explorations", service.NewExploration)
@ -133,7 +132,7 @@ func AuthAPI(opts MuxOpts, router *httprouter.Router) http.Handler {
opts.Logger, opts.Logger,
) )
router.GET("/oauth", gh.Login()) router.GET("/oauth/github", gh.Login())
router.GET("/oauth/logout", gh.Logout()) router.GET("/oauth/logout", gh.Logout())
router.GET("/oauth/github/callback", gh.Callback()) router.GET("/oauth/github/callback", gh.Callback())
@ -152,44 +151,45 @@ func encodeJSON(w http.ResponseWriter, status int, v interface{}, logger chronog
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status) w.WriteHeader(status)
if err := json.NewEncoder(w).Encode(v); err != nil { if err := json.NewEncoder(w).Encode(v); err != nil {
unknownErrorWithMessage(w, err) unknownErrorWithMessage(w, err, logger)
} }
} }
// Error writes an JSON message // Error writes an JSON message
func Error(w http.ResponseWriter, code int, msg string) { func Error(w http.ResponseWriter, code int, msg string, logger chronograf.Logger) {
e := struct { e := ErrorMessage{
Code int `json:"code"`
Message string `json:"message"`
}{
Code: code, Code: code,
Message: msg, Message: msg,
} }
b, err := json.Marshal(e) b, err := json.Marshal(e)
if err != nil { if err != nil {
//log.Print("go-oidc: failed to marshal %#v: %v", e, err)
code = http.StatusInternalServerError code = http.StatusInternalServerError
b = []byte(`{"code": 500, "message":"server_error"}`) b = []byte(`{"code": 500, "message":"server_error"}`)
} }
logger.
WithField("component", "server").
WithField("http_status ", code).
Error("Error message ", msg)
w.Header().Set("Content-Type", JSONType) w.Header().Set("Content-Type", JSONType)
w.WriteHeader(code) w.WriteHeader(code)
w.Write(b) w.Write(b)
} }
func invalidData(w http.ResponseWriter, err error) { func invalidData(w http.ResponseWriter, err error, logger chronograf.Logger) {
Error(w, http.StatusUnprocessableEntity, fmt.Sprintf("%v", err)) Error(w, http.StatusUnprocessableEntity, fmt.Sprintf("%v", err), logger)
} }
func invalidJSON(w http.ResponseWriter) { func invalidJSON(w http.ResponseWriter, logger chronograf.Logger) {
Error(w, http.StatusBadRequest, "Unparsable JSON") Error(w, http.StatusBadRequest, "Unparsable JSON", logger)
} }
func unknownErrorWithMessage(w http.ResponseWriter, err error) { func unknownErrorWithMessage(w http.ResponseWriter, err error, logger chronograf.Logger) {
Error(w, http.StatusInternalServerError, fmt.Sprintf("Unknown error: %v", err)) Error(w, http.StatusInternalServerError, fmt.Sprintf("Unknown error: %v", err), logger)
} }
func notFound(w http.ResponseWriter, id int) { func notFound(w http.ResponseWriter, id int, logger chronograf.Logger) {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id)) Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id), logger)
} }
func paramID(key string, r *http.Request) (int, error) { func paramID(key string, r *http.Request) (int, error) {

View File

@ -26,30 +26,30 @@ type postProxyResponse struct {
func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) { func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r) id, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
var req chronograf.Query var req chronograf.Query
if err = json.NewDecoder(r.Body).Decode(&req); err != nil { if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
if err = ValidProxyRequest(req); err != nil { if err = ValidProxyRequest(req); err != nil {
invalidData(w, err) invalidData(w, err, h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
src, err := h.SourcesStore.Get(ctx, id) src, err := h.SourcesStore.Get(ctx, id)
if err != nil { if err != nil {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
if err = h.TimeSeries.Connect(ctx, &src); err != nil { if err = h.TimeSeries.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d", id) msg := fmt.Sprintf("Unable to connect to source %d", id)
Error(w, http.StatusBadRequest, msg) Error(w, http.StatusBadRequest, msg, h.Logger)
return return
} }
@ -57,11 +57,11 @@ func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
if err == chronograf.ErrUpstreamTimeout { if err == chronograf.ErrUpstreamTimeout {
msg := "Timeout waiting for Influx response" msg := "Timeout waiting for Influx response"
Error(w, http.StatusRequestTimeout, msg) Error(w, http.StatusRequestTimeout, msg, h.Logger)
return return
} }
// TODO: Here I want to return the error code from influx. // TODO: Here I want to return the error code from influx.
Error(w, http.StatusBadRequest, err.Error()) Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return return
} }
@ -75,33 +75,33 @@ func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) {
func (h *Service) KapacitorProxy(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorProxy(w http.ResponseWriter, r *http.Request) {
srcID, err := paramID("id", r) srcID, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
id, err := paramID("kid", r) id, err := paramID("kid", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
path := r.URL.Query().Get("path") path := r.URL.Query().Get("path")
if path == "" { if path == "" {
Error(w, http.StatusUnprocessableEntity, "path query parameter required") Error(w, http.StatusUnprocessableEntity, "path query parameter required", h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id) srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID { if err != nil || srv.SrcID != srcID {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
u, err := url.Parse(srv.URL) u, err := url.Parse(srv.URL)
if err != nil { if err != nil {
msg := fmt.Sprintf("Error parsing kapacitor url: %v", err) msg := fmt.Sprintf("Error parsing kapacitor url: %v", err)
Error(w, http.StatusUnprocessableEntity, msg) Error(w, http.StatusUnprocessableEntity, msg, h.Logger)
return return
} }

View File

@ -11,6 +11,7 @@ type getRoutesResponse struct {
Mappings string `json:"mappings"` // Location of the application mappings endpoint Mappings string `json:"mappings"` // Location of the application mappings endpoint
Sources string `json:"sources"` // Location of the sources endpoint Sources string `json:"sources"` // Location of the sources endpoint
Users string `json:"users"` // Location of the users endpoint Users string `json:"users"` // Location of the users endpoint
Me string `json:"me"` // Location of the me endpoint
} }
// AllRoutes returns all top level routes within chronograf // AllRoutes returns all top level routes within chronograf
@ -19,6 +20,7 @@ func AllRoutes(logger chronograf.Logger) http.HandlerFunc {
Sources: "/chronograf/v1/sources", Sources: "/chronograf/v1/sources",
Layouts: "/chronograf/v1/layouts", Layouts: "/chronograf/v1/layouts",
Users: "/chronograf/v1/users", Users: "/chronograf/v1/users",
Me: "/chronograf/v1/me",
Mappings: "/chronograf/v1/mappings", Mappings: "/chronograf/v1/mappings",
} }

View File

@ -47,6 +47,7 @@ type Server struct {
handler http.Handler handler http.Handler
} }
// BuildInfo is sent to the usage client to track versions and commits
type BuildInfo struct { type BuildInfo struct {
Version string Version string
Commit string Commit string
@ -59,7 +60,7 @@ func (s *Server) useAuth() bool {
// Serve starts and runs the chronograf server // Serve starts and runs the chronograf server
func (s *Server) Serve() error { func (s *Server) Serve() error {
logger := clog.New(clog.ParseLevel(s.LogLevel)) logger := clog.New(clog.ParseLevel(s.LogLevel))
service := openService(s.BoltPath, s.CannedPath, logger) service := openService(s.BoltPath, s.CannedPath, logger, s.useAuth())
s.handler = NewMux(MuxOpts{ s.handler = NewMux(MuxOpts{
Develop: s.Develop, Develop: s.Develop,
TokenSecret: s.TokenSecret, TokenSecret: s.TokenSecret,
@ -105,7 +106,7 @@ func (s *Server) Serve() error {
return nil return nil
} }
func openService(boltPath, cannedPath string, logger chronograf.Logger) Service { func openService(boltPath, cannedPath string, logger chronograf.Logger, useAuth bool) Service {
db := bolt.NewClient() db := bolt.NewClient()
db.Path = boltPath db.Path = boltPath
if err := db.Open(); err != nil { if err := db.Open(); err != nil {
@ -136,11 +137,14 @@ func openService(boltPath, cannedPath string, logger chronograf.Logger) Service
ExplorationStore: db.ExplorationStore, ExplorationStore: db.ExplorationStore,
SourcesStore: db.SourcesStore, SourcesStore: db.SourcesStore,
ServersStore: db.ServersStore, ServersStore: db.ServersStore,
UsersStore: db.UsersStore,
TimeSeries: &influx.Client{ TimeSeries: &influx.Client{
Logger: logger, Logger: logger,
}, },
LayoutStore: layouts, LayoutStore: layouts,
AlertRulesStore: db.AlertsStore, AlertRulesStore: db.AlertsStore,
Logger: logger,
UseAuth: useAuth,
} }
} }

View File

@ -9,6 +9,14 @@ type Service struct {
ServersStore chronograf.ServersStore ServersStore chronograf.ServersStore
LayoutStore chronograf.LayoutStore LayoutStore chronograf.LayoutStore
AlertRulesStore chronograf.AlertRulesStore AlertRulesStore chronograf.AlertRulesStore
UsersStore chronograf.UsersStore
TimeSeries chronograf.TimeSeries TimeSeries chronograf.TimeSeries
Logger chronograf.Logger Logger chronograf.Logger
UseAuth bool
}
// ErrorMessage is the error response format for all service errors
type ErrorMessage struct {
Code int `json:"code"`
Message string `json:"message"`
} }

View File

@ -21,6 +21,11 @@ type sourceResponse struct {
} }
func newSourceResponse(src chronograf.Source) sourceResponse { func newSourceResponse(src chronograf.Source) sourceResponse {
// If telegraf is not set, we'll set it to the default value.
if src.Telegraf == "" {
src.Telegraf = "telegraf"
}
httpAPISrcs := "/chronograf/v1/sources" httpAPISrcs := "/chronograf/v1/sources"
return sourceResponse{ return sourceResponse{
Source: src, Source: src,
@ -36,18 +41,24 @@ func newSourceResponse(src chronograf.Source) sourceResponse {
func (h *Service) NewSource(w http.ResponseWriter, r *http.Request) { func (h *Service) NewSource(w http.ResponseWriter, r *http.Request) {
var src chronograf.Source var src chronograf.Source
if err := json.NewDecoder(r.Body).Decode(&src); err != nil { if err := json.NewDecoder(r.Body).Decode(&src); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
if err := ValidSourceRequest(src); err != nil { if err := ValidSourceRequest(src); err != nil {
invalidData(w, err) invalidData(w, err, h.Logger)
return return
} }
// By default the telegraf database will be telegraf
if src.Telegraf == "" {
src.Telegraf = "telegraf"
}
var err error var err error
if src, err = h.SourcesStore.Add(r.Context(), src); err != nil { if src, err = h.SourcesStore.Add(r.Context(), src); err != nil {
msg := fmt.Errorf("Error storing source %v: %v", src, err) msg := fmt.Errorf("Error storing source %v: %v", src, err)
unknownErrorWithMessage(w, msg) unknownErrorWithMessage(w, msg, h.Logger)
return return
} }
@ -65,7 +76,7 @@ func (h *Service) Sources(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
srcs, err := h.SourcesStore.All(ctx) srcs, err := h.SourcesStore.All(ctx)
if err != nil { if err != nil {
Error(w, http.StatusInternalServerError, "Error loading sources") Error(w, http.StatusInternalServerError, "Error loading sources", h.Logger)
return return
} }
@ -84,14 +95,14 @@ func (h *Service) Sources(w http.ResponseWriter, r *http.Request) {
func (h *Service) SourcesID(w http.ResponseWriter, r *http.Request) { func (h *Service) SourcesID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r) id, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
src, err := h.SourcesStore.Get(ctx, id) src, err := h.SourcesStore.Get(ctx, id)
if err != nil { if err != nil {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
@ -103,14 +114,14 @@ func (h *Service) SourcesID(w http.ResponseWriter, r *http.Request) {
func (h *Service) RemoveSource(w http.ResponseWriter, r *http.Request) { func (h *Service) RemoveSource(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r) id, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
src := chronograf.Source{ID: id} src := chronograf.Source{ID: id}
ctx := r.Context() ctx := r.Context()
if err = h.SourcesStore.Delete(ctx, src); err != nil { if err = h.SourcesStore.Delete(ctx, src); err != nil {
unknownErrorWithMessage(w, err) unknownErrorWithMessage(w, err, h.Logger)
return return
} }
@ -121,20 +132,20 @@ func (h *Service) RemoveSource(w http.ResponseWriter, r *http.Request) {
func (h *Service) UpdateSource(w http.ResponseWriter, r *http.Request) { func (h *Service) UpdateSource(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r) id, err := paramID("id", r)
if err != nil { if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error()) Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return return
} }
ctx := r.Context() ctx := r.Context()
src, err := h.SourcesStore.Get(ctx, id) src, err := h.SourcesStore.Get(ctx, id)
if err != nil { if err != nil {
notFound(w, id) notFound(w, id, h.Logger)
return return
} }
var req chronograf.Source var req chronograf.Source
if err := json.NewDecoder(r.Body).Decode(&req); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w) invalidJSON(w, h.Logger)
return return
} }
@ -154,15 +165,18 @@ func (h *Service) UpdateSource(w http.ResponseWriter, r *http.Request) {
if req.Type != "" { if req.Type != "" {
src.Type = req.Type src.Type = req.Type
} }
if req.Telegraf != "" {
src.Telegraf = req.Telegraf
}
if err := ValidSourceRequest(src); err != nil { if err := ValidSourceRequest(src); err != nil {
invalidData(w, err) invalidData(w, err, h.Logger)
return return
} }
if err := h.SourcesStore.Update(ctx, src); err != nil { if err := h.SourcesStore.Update(ctx, src); err != nil {
msg := fmt.Sprintf("Error updating source ID %d", id) msg := fmt.Sprintf("Error updating source ID %d", id)
Error(w, http.StatusInternalServerError, msg) Error(w, http.StatusInternalServerError, msg, h.Logger)
return return
} }
encodeJSON(w, http.StatusOK, newSourceResponse(src), h.Logger) encodeJSON(w, http.StatusOK, newSourceResponse(src), h.Logger)

58
server/sources_test.go Normal file
View File

@ -0,0 +1,58 @@
package server
import (
"reflect"
"testing"
"github.com/influxdata/chronograf"
)
func Test_newSourceResponse(t *testing.T) {
tests := []struct {
name string
src chronograf.Source
want sourceResponse
}{
{
name: "Test empty telegraf",
src: chronograf.Source{
ID: 1,
Telegraf: "",
},
want: sourceResponse{
Source: chronograf.Source{
ID: 1,
Telegraf: "telegraf",
},
Links: sourceLinks{
Self: "/chronograf/v1/sources/1",
Proxy: "/chronograf/v1/sources/1/proxy",
Kapacitors: "/chronograf/v1/sources/1/kapacitors",
},
},
},
{
name: "Test non-default telegraf",
src: chronograf.Source{
ID: 1,
Telegraf: "howdy",
},
want: sourceResponse{
Source: chronograf.Source{
ID: 1,
Telegraf: "howdy",
},
Links: sourceLinks{
Self: "/chronograf/v1/sources/1",
Proxy: "/chronograf/v1/sources/1/proxy",
Kapacitors: "/chronograf/v1/sources/1/kapacitors",
},
},
},
}
for _, tt := range tests {
if got := newSourceResponse(tt.src); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. newSourceResponse() = %v, want %v", tt.name, got, tt.want)
}
}
}

View File

@ -57,14 +57,16 @@
}, },
"post": { "post": {
"summary": "Create new data source", "summary": "Create new data source",
"parameters": [{ "parameters": [
{
"name": "source", "name": "source",
"in": "body", "in": "body",
"description": "Configuration options for data source", "description": "Configuration options for data source",
"schema": { "schema": {
"$ref": "#/definitions/Source" "$ref": "#/definitions/Source"
} }
}], }
],
"responses": { "responses": {
"201": { "201": {
"description": "Successfully create data source", "description": "Successfully create data source",
@ -90,13 +92,15 @@
}, },
"/sources/{id}": { "/sources/{id}": {
"get": { "get": {
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the data source", "description": "ID of the data source",
"required": true "required": true
}], }
],
"summary": "Configured data sources", "summary": "Configured data sources",
"description": "These data sources store time series data.", "description": "These data sources store time series data.",
"responses": { "responses": {
@ -122,13 +126,15 @@
}, },
"patch": { "patch": {
"summary": "Update data source configuration", "summary": "Update data source configuration",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of a data source", "description": "ID of a data source",
"required": true "required": true
}, { },
{
"name": "config", "name": "config",
"in": "body", "in": "body",
"description": "data source configuration", "description": "data source configuration",
@ -136,7 +142,8 @@
"$ref": "#/definitions/Source" "$ref": "#/definitions/Source"
}, },
"required": true "required": true
}], }
],
"responses": { "responses": {
"200": { "200": {
"description": "Data source's configuration was changed", "description": "Data source's configuration was changed",
@ -159,13 +166,15 @@
} }
}, },
"delete": { "delete": {
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}], }
],
"summary": "This specific data source will be removed from the data store", "summary": "This specific data source will be removed from the data store",
"responses": { "responses": {
"204": { "204": {
@ -189,13 +198,15 @@
"/sources/{id}/proxy": { "/sources/{id}/proxy": {
"post": { "post": {
"description": "Query the backend time series data source and return the response according to `format`", "description": "Query the backend time series data source and return the response according to `format`",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the data source", "description": "ID of the data source",
"required": true "required": true
}, { },
{
"name": "query", "name": "query",
"in": "body", "in": "body",
"description": "Query Parameters", "description": "Query Parameters",
@ -203,7 +214,8 @@
"$ref": "#/definitions/Proxy" "$ref": "#/definitions/Proxy"
}, },
"required": true "required": true
}], }
],
"responses": { "responses": {
"200": { "200": {
"description": "Result of the query from the backend time series data source.", "description": "Result of the query from the backend time series data source.",
@ -258,14 +270,16 @@
}, },
"post": { "post": {
"summary": "Create new user for this data source", "summary": "Create new user for this data source",
"parameters": [{ "parameters": [
{
"name": "user", "name": "user",
"in": "body", "in": "body",
"description": "Configuration options for new user", "description": "Configuration options for new user",
"schema": { "schema": {
"$ref": "#/definitions/User" "$ref": "#/definitions/User"
} }
}], }
],
"responses": { "responses": {
"201": { "201": {
"description": "Successfully created new user", "description": "Successfully created new user",
@ -291,13 +305,15 @@
}, },
"/users/{user_id}": { "/users/{user_id}": {
"get": { "get": {
"parameters": [{ "parameters": [
{
"name": "user_id", "name": "user_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the specific user", "description": "ID of the specific user",
"required": true "required": true
}], }
],
"summary": "Returns information about a specific user", "summary": "Returns information about a specific user",
"description": "Specific User.\n", "description": "Specific User.\n",
"responses": { "responses": {
@ -323,13 +339,15 @@
}, },
"patch": { "patch": {
"summary": "Update user configuration", "summary": "Update user configuration",
"parameters": [{ "parameters": [
{
"name": "user_id", "name": "user_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the specific user", "description": "ID of the specific user",
"required": true "required": true
}, { },
{
"name": "config", "name": "config",
"in": "body", "in": "body",
"description": "user configuration", "description": "user configuration",
@ -337,7 +355,8 @@
"$ref": "#/definitions/User" "$ref": "#/definitions/User"
}, },
"required": true "required": true
}], }
],
"responses": { "responses": {
"200": { "200": {
"description": "Users's configuration was changed", "description": "Users's configuration was changed",
@ -360,13 +379,15 @@
} }
}, },
"delete": { "delete": {
"parameters": [{ "parameters": [
{
"name": "user_id", "name": "user_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the specific user", "description": "ID of the specific user",
"required": true "required": true
}], }
],
"summary": "This specific user will be removed from the data store", "summary": "This specific user will be removed from the data store",
"responses": { "responses": {
"204": { "204": {
@ -389,13 +410,15 @@
}, },
"/users/{user_id}/explorations": { "/users/{user_id}/explorations": {
"get": { "get": {
"parameters": [{ "parameters": [
{
"name": "user_id", "name": "user_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "All Data Explorations returned only for this user.", "description": "All Data Explorations returned only for this user.",
"required": true "required": true
}], }
],
"responses": { "responses": {
"200": { "200": {
"description": "Data Explorations saved sessions for user are returned.", "description": "Data Explorations saved sessions for user are returned.",
@ -419,20 +442,23 @@
}, },
"post": { "post": {
"summary": "Create new named exploration for this user", "summary": "Create new named exploration for this user",
"parameters": [{ "parameters": [
{
"name": "user_id", "name": "user_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of user to associate this exploration with.", "description": "ID of user to associate this exploration with.",
"required": true "required": true
}, { },
{
"name": "exploration", "name": "exploration",
"in": "body", "in": "body",
"description": "Exploration session to save", "description": "Exploration session to save",
"schema": { "schema": {
"$ref": "#/definitions/Exploration" "$ref": "#/definitions/Exploration"
} }
}], }
],
"responses": { "responses": {
"201": { "201": {
"description": "Successfully created new Exploration session", "description": "Successfully created new Exploration session",
@ -464,19 +490,22 @@
}, },
"/users/{user_id}/explorations/{exploration_id}": { "/users/{user_id}/explorations/{exploration_id}": {
"get": { "get": {
"parameters": [{ "parameters": [
{
"name": "user_id", "name": "user_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of user to associate this exploration with.", "description": "ID of user to associate this exploration with.",
"required": true "required": true
}, { },
{
"name": "exploration_id", "name": "exploration_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the specific exploration.", "description": "ID of the specific exploration.",
"required": true "required": true
}], }
],
"summary": "Returns the specified data exploration session", "summary": "Returns the specified data exploration session",
"description": "A data exploration session specifies query information.\n", "description": "A data exploration session specifies query information.\n",
"responses": { "responses": {
@ -502,19 +531,22 @@
}, },
"patch": { "patch": {
"summary": "Update exploration configuration", "summary": "Update exploration configuration",
"parameters": [{ "parameters": [
{
"name": "user_id", "name": "user_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of user", "description": "ID of user",
"required": true "required": true
}, { },
{
"name": "exploration_id", "name": "exploration_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the specific exploration.", "description": "ID of the specific exploration.",
"required": true "required": true
}, { },
{
"name": "exploration", "name": "exploration",
"in": "body", "in": "body",
"description": "Update the exploration information to this.", "description": "Update the exploration information to this.",
@ -522,7 +554,8 @@
"schema": { "schema": {
"$ref": "#/definitions/Exploration" "$ref": "#/definitions/Exploration"
} }
}], }
],
"responses": { "responses": {
"200": { "200": {
"description": "Exploration's configuration was changed", "description": "Exploration's configuration was changed",
@ -545,19 +578,22 @@
} }
}, },
"delete": { "delete": {
"parameters": [{ "parameters": [
{
"name": "user_id", "name": "user_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of user to associate this exploration with.", "description": "ID of user to associate this exploration with.",
"required": true "required": true
}, { },
{
"name": "exploration_id", "name": "exploration_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the specific exploration.", "description": "ID of the specific exploration.",
"required": true "required": true
}], }
],
"summary": "This specific exporer session will be removed.", "summary": "This specific exporer session will be removed.",
"responses": { "responses": {
"204": { "204": {
@ -580,13 +616,15 @@
}, },
"/sources/{id}/kapacitors": { "/sources/{id}/kapacitors": {
"get": { "get": {
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}], }
],
"summary": "Configured kapacitors", "summary": "Configured kapacitors",
"responses": { "responses": {
"200": { "200": {
@ -605,20 +643,23 @@
}, },
"post": { "post": {
"summary": "Create new kapacitor backend", "summary": "Create new kapacitor backend",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapacitor", "name": "kapacitor",
"in": "body", "in": "body",
"description": "Configuration options for kapacitor", "description": "Configuration options for kapacitor",
"schema": { "schema": {
"$ref": "#/definitions/Kapacitor" "$ref": "#/definitions/Kapacitor"
} }
}], }
],
"responses": { "responses": {
"201": { "201": {
"description": "Successfully created kapacitor source", "description": "Successfully created kapacitor source",
@ -644,19 +685,22 @@
}, },
"/sources/{id}/kapacitors/{kapa_id}": { "/sources/{id}/kapacitors/{kapa_id}": {
"get": { "get": {
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor", "description": "ID of the kapacitor",
"required": true "required": true
}], }
],
"summary": "Configured kapacitors", "summary": "Configured kapacitors",
"description": "These kapacitors are used for monitoring and alerting.", "description": "These kapacitors are used for monitoring and alerting.",
"responses": { "responses": {
@ -682,19 +726,22 @@
}, },
"patch": { "patch": {
"summary": "Update kapacitor configuration", "summary": "Update kapacitor configuration",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of a kapacitor backend", "description": "ID of a kapacitor backend",
"required": true "required": true
}, { },
{
"name": "config", "name": "config",
"in": "body", "in": "body",
"description": "kapacitor configuration", "description": "kapacitor configuration",
@ -702,7 +749,8 @@
"$ref": "#/definitions/Kapacitor" "$ref": "#/definitions/Kapacitor"
}, },
"required": true "required": true
}], }
],
"responses": { "responses": {
"200": { "200": {
"description": "Kapacitor's configuration was changed", "description": "Kapacitor's configuration was changed",
@ -725,19 +773,22 @@
} }
}, },
"delete": { "delete": {
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor", "description": "ID of the kapacitor",
"required": true "required": true
}], }
],
"summary": "This specific kapacitor will be removed.", "summary": "This specific kapacitor will be removed.",
"responses": { "responses": {
"204": { "204": {
@ -761,19 +812,22 @@
"/sources/{id}/kapacitors/{kapa_id}/tasks": { "/sources/{id}/kapacitors/{kapa_id}/tasks": {
"get": { "get": {
"description": "Get all defined alert tasks.", "description": "Get all defined alert tasks.",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor backend.", "description": "ID of the kapacitor backend.",
"required": true "required": true
}], }
],
"responses": { "responses": {
"200": { "200": {
"description": "All alert tasks for this specific kapacitor are returned", "description": "All alert tasks for this specific kapacitor are returned",
@ -797,19 +851,22 @@
}, },
"post": { "post": {
"description": "Create kapacitor alert task", "description": "Create kapacitor alert task",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor backend.", "description": "ID of the kapacitor backend.",
"required": true "required": true
}, { },
{
"name": "task", "name": "task",
"in": "body", "in": "body",
"description": "Rule to generate alert task", "description": "Rule to generate alert task",
@ -817,7 +874,8 @@
"$ref": "#/definitions/Task" "$ref": "#/definitions/Task"
}, },
"required": true "required": true
}], }
],
"responses": { "responses": {
"201": { "201": {
"description": "Successfully created new kapacitor alert task", "description": "Successfully created new kapacitor alert task",
@ -849,25 +907,29 @@
}, },
"/sources/{id}/kapacitors/{kapa_id}/tasks/{task_id}": { "/sources/{id}/kapacitors/{kapa_id}/tasks/{task_id}": {
"get": { "get": {
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor", "description": "ID of the kapacitor",
"required": true "required": true
}, { },
{
"name": "task_id", "name": "task_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the task", "description": "ID of the task",
"required": true "required": true
}], }
],
"summary": "Specific kapacitor alert task", "summary": "Specific kapacitor alert task",
"description": "Alerting task for kapacitor", "description": "Alerting task for kapacitor",
"responses": { "responses": {
@ -893,26 +955,28 @@
}, },
"put": { "put": {
"summary": "Update rule alert task configuration", "summary": "Update rule alert task configuration",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of a kapacitor backend", "description": "ID of a kapacitor backend",
"required": true "required": true
}, { },
{
"name": "task_id", "name": "task_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of a task", "description": "ID of a task",
"required": true "required": true
}, },
{ {
"name": "task", "name": "task",
"in": "body", "in": "body",
@ -945,26 +1009,28 @@
} }
}, },
"delete": { "delete": {
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor", "description": "ID of the kapacitor",
"required": true "required": true
}, { },
{
"name": "task_id", "name": "task_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the task", "description": "ID of the task",
"required": true "required": true
} }
], ],
"summary": "This specific alert task will be removed.", "summary": "This specific alert task will be removed.",
"responses": { "responses": {
@ -989,25 +1055,29 @@
"/sources/{id}/kapacitors/{kapa_id}/proxy": { "/sources/{id}/kapacitors/{kapa_id}/proxy": {
"get": { "get": {
"description": "GET to `path` of kapacitor. The response and status code from kapacitor is directly returned.", "description": "GET to `path` of kapacitor. The response and status code from kapacitor is directly returned.",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor backend.", "description": "ID of the kapacitor backend.",
"required": true "required": true
}, { },
{
"name": "path", "name": "path",
"in": "query", "in": "query",
"type": "string", "type": "string",
"description": "The kapacitor API path to use in the proxy redirect", "description": "The kapacitor API path to use in the proxy redirect",
"required": true "required": true
}], }
],
"responses": { "responses": {
"204": { "204": {
"description": "Kapacitor returned no content" "description": "Kapacitor returned no content"
@ -1028,25 +1098,29 @@
}, },
"delete": { "delete": {
"description": "DELETE to `path` of kapacitor. The response and status code from kapacitor is directly returned.", "description": "DELETE to `path` of kapacitor. The response and status code from kapacitor is directly returned.",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor backend.", "description": "ID of the kapacitor backend.",
"required": true "required": true
}, { },
{
"name": "path", "name": "path",
"in": "query", "in": "query",
"type": "string", "type": "string",
"description": "The kapacitor API path to use in the proxy redirect", "description": "The kapacitor API path to use in the proxy redirect",
"required": true "required": true
}], }
],
"responses": { "responses": {
"204": { "204": {
"description": "Kapacitor returned no content" "description": "Kapacitor returned no content"
@ -1067,25 +1141,29 @@
}, },
"patch": { "patch": {
"description": "PATCH body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", "description": "PATCH body directly to configured kapacitor. The response and status code from kapacitor is directly returned.",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor backend.", "description": "ID of the kapacitor backend.",
"required": true "required": true
}, { },
{
"name": "path", "name": "path",
"in": "query", "in": "query",
"type": "string", "type": "string",
"description": "The kapacitor API path to use in the proxy redirect", "description": "The kapacitor API path to use in the proxy redirect",
"required": true "required": true
}, { },
{
"name": "query", "name": "query",
"in": "body", "in": "body",
"description": "Kapacitor body", "description": "Kapacitor body",
@ -1093,7 +1171,8 @@
"$ref": "#/definitions/KapacitorProxy" "$ref": "#/definitions/KapacitorProxy"
}, },
"required": true "required": true
}], }
],
"responses": { "responses": {
"204": { "204": {
"description": "Kapacitor returned no content" "description": "Kapacitor returned no content"
@ -1114,25 +1193,29 @@
}, },
"post": { "post": {
"description": "POST body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", "description": "POST body directly to configured kapacitor. The response and status code from kapacitor is directly returned.",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the source", "description": "ID of the source",
"required": true "required": true
}, { },
{
"name": "kapa_id", "name": "kapa_id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the kapacitor backend.", "description": "ID of the kapacitor backend.",
"required": true "required": true
}, { },
{
"name": "path", "name": "path",
"in": "query", "in": "query",
"type": "string", "type": "string",
"description": "The kapacitor API path to use in the proxy redirect", "description": "The kapacitor API path to use in the proxy redirect",
"required": true "required": true
}, { },
{
"name": "query", "name": "query",
"in": "body", "in": "body",
"description": "Kapacitor body", "description": "Kapacitor body",
@ -1140,7 +1223,8 @@
"$ref": "#/definitions/KapacitorProxy" "$ref": "#/definitions/KapacitorProxy"
}, },
"required": true "required": true
}], }
],
"responses": { "responses": {
"204": { "204": {
"description": "Kapacitor returned no content" "description": "Kapacitor returned no content"
@ -1183,7 +1267,8 @@
"/layouts": { "/layouts": {
"get": { "get": {
"summary": "Pre-configured layouts", "summary": "Pre-configured layouts",
"parameters": [{ "parameters": [
{
"name": "measurement", "name": "measurement",
"in": "query", "in": "query",
"description": "Returns layouts with this measurement", "description": "Returns layouts with this measurement",
@ -1193,7 +1278,8 @@
"type": "string" "type": "string"
}, },
"collectionFormat": "multi" "collectionFormat": "multi"
}, { },
{
"name": "app", "name": "app",
"in": "query", "in": "query",
"description": "Returns layouts with this app", "description": "Returns layouts with this app",
@ -1203,7 +1289,8 @@
"type": "string" "type": "string"
}, },
"collectionFormat": "multi" "collectionFormat": "multi"
}], }
],
"description": "Layouts are a collection of `Cells` that visualize time-series data.\n", "description": "Layouts are a collection of `Cells` that visualize time-series data.\n",
"responses": { "responses": {
"200": { "200": {
@ -1222,14 +1309,16 @@
}, },
"post": { "post": {
"summary": "Create new layout", "summary": "Create new layout",
"parameters": [{ "parameters": [
{
"name": "layout", "name": "layout",
"in": "body", "in": "body",
"description": "Defines the layout and queries of the cells within the layout.", "description": "Defines the layout and queries of the cells within the layout.",
"schema": { "schema": {
"$ref": "#/definitions/Layout" "$ref": "#/definitions/Layout"
} }
}], }
],
"responses": { "responses": {
"201": { "201": {
"description": "Successfully created new layout", "description": "Successfully created new layout",
@ -1255,13 +1344,15 @@
}, },
"/layouts/{id}": { "/layouts/{id}": {
"get": { "get": {
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the layout", "description": "ID of the layout",
"required": true "required": true
}], }
],
"summary": "Specific pre-configured layout containing cells and queries.", "summary": "Specific pre-configured layout containing cells and queries.",
"description": "layouts will hold information about how to layout the page of graphs.\n", "description": "layouts will hold information about how to layout the page of graphs.\n",
"responses": { "responses": {
@ -1286,13 +1377,15 @@
} }
}, },
"delete": { "delete": {
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of the layout", "description": "ID of the layout",
"required": true "required": true
}], }
],
"summary": "This specific layout will be removed from the data store", "summary": "This specific layout will be removed from the data store",
"responses": { "responses": {
"204": { "204": {
@ -1314,13 +1407,15 @@
}, },
"put": { "put": {
"summary": "Replace layout configuration.", "summary": "Replace layout configuration.",
"parameters": [{ "parameters": [
{
"name": "id", "name": "id",
"in": "path", "in": "path",
"type": "string", "type": "string",
"description": "ID of a layout", "description": "ID of a layout",
"required": true "required": true
}, { },
{
"name": "config", "name": "config",
"in": "body", "in": "body",
"description": "layout configuration update parameters", "description": "layout configuration update parameters",
@ -1328,7 +1423,8 @@
"$ref": "#/definitions/Layout" "$ref": "#/definitions/Layout"
}, },
"required": true "required": true
}], }
],
"responses": { "responses": {
"200": { "200": {
"description": "Layout has been replaced and the new layout is returned.", "description": "Layout has been replaced and the new layout is returned.",
@ -1481,6 +1577,11 @@
"type": "boolean", "type": "boolean",
"description": "Indicates whether this source is the default source" "description": "Indicates whether this source is the default source"
}, },
"telegraf": {
"type": "string",
"description": "Database where telegraf information is stored for this source",
"default": "telegraf"
},
"links": { "links": {
"type": "object", "type": "object",
"properties": { "properties": {
@ -1711,7 +1812,32 @@
"description": "Time-series data queries for Cell.", "description": "Time-series data queries for Cell.",
"type": "array", "type": "array",
"items": { "items": {
"$ref": "#/definitions/Proxy" "$ref": "#/definitions/LayoutQuery"
}
}
}
},
"LayoutQuery": {
"type": "object",
"required": [
"query"
],
"properties": {
"query": {
"type": "string"
},
"wheres": {
"description": "Defines the condition clauses for influxdb",
"type": "array",
"items": {
"type": "string"
}
},
"groupbys": {
"description": "Defines the group by clauses for influxdb",
"type": "array",
"items": {
"type": "string"
} }
} }
} }

180
server/users.go Normal file
View File

@ -0,0 +1,180 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"golang.org/x/net/context"
"github.com/influxdata/chronograf"
)
type userLinks struct {
Self string `json:"self"` // Self link mapping to this resource
Explorations string `json:"explorations"` // URL for explorations endpoint
}
type userResponse struct {
*chronograf.User
Links userLinks `json:"links"`
}
func newUserResponse(usr *chronograf.User) userResponse {
base := "/chronograf/v1/users"
return userResponse{
User: usr,
Links: userLinks{
Self: fmt.Sprintf("%s/%d", base, usr.ID),
Explorations: fmt.Sprintf("%s/%d/explorations", base, usr.ID),
},
}
}
// NewUser adds a new valid user to the store
func (h *Service) NewUser(w http.ResponseWriter, r *http.Request) {
var usr *chronograf.User
if err := json.NewDecoder(r.Body).Decode(usr); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := ValidUserRequest(usr); err != nil {
invalidData(w, err, h.Logger)
return
}
var err error
if usr, err = h.UsersStore.Add(r.Context(), usr); err != nil {
msg := fmt.Errorf("error storing user %v: %v", *usr, err)
unknownErrorWithMessage(w, msg, h.Logger)
return
}
res := newUserResponse(usr)
w.Header().Add("Location", res.Links.Self)
encodeJSON(w, http.StatusCreated, res, h.Logger)
}
// UserID retrieves a user from the store
func (h *Service) UserID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
usr, err := h.UsersStore.Get(ctx, chronograf.UserID(id))
if err != nil {
notFound(w, id, h.Logger)
return
}
res := newUserResponse(usr)
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// RemoveUser deletes the user from the store
func (h *Service) RemoveUser(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
usr := &chronograf.User{ID: chronograf.UserID(id)}
ctx := r.Context()
if err = h.UsersStore.Delete(ctx, usr); err != nil {
unknownErrorWithMessage(w, err, h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// UpdateUser handles incremental updates of a data user
func (h *Service) UpdateUser(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
usr, err := h.UsersStore.Get(ctx, chronograf.UserID(id))
if err != nil {
notFound(w, id, h.Logger)
return
}
var req chronograf.User
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
usr.Email = req.Email
if err := ValidUserRequest(usr); err != nil {
invalidData(w, err, h.Logger)
return
}
if err := h.UsersStore.Update(ctx, usr); err != nil {
msg := fmt.Sprintf("Error updating user ID %d", id)
Error(w, http.StatusInternalServerError, msg, h.Logger)
return
}
encodeJSON(w, http.StatusOK, newUserResponse(usr), h.Logger)
}
// ValidUserRequest checks if email is nonempty
func ValidUserRequest(s *chronograf.User) error {
// email is required
if s.Email == "" {
return fmt.Errorf("Email required")
}
return nil
}
func getEmail(ctx context.Context) (string, error) {
principal := ctx.Value(chronograf.PrincipalKey).(chronograf.Principal)
if principal == "" {
return "", fmt.Errorf("Token not found")
}
return string(principal), nil
}
// Me does a findOrCreate based on the email in the context
func (h *Service) Me(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if !h.UseAuth {
Error(w, http.StatusTeapot, fmt.Sprintf("%v", "Go to line 151 users.go. Look for Arnold"), h.Logger)
_ = 42 // did you mean to learn the answer? if so go to line aslfjasdlfja; (gee willickers.... tbc)
return
}
email, err := getEmail(ctx)
if err != nil {
invalidData(w, err, h.Logger)
return
}
usr, err := h.UsersStore.FindByEmail(ctx, email)
if err == nil {
res := newUserResponse(usr)
encodeJSON(w, http.StatusOK, res, h.Logger)
return
}
// Because we didnt find a user, making a new one
user := &chronograf.User{
Email: email,
}
user, err = h.UsersStore.Add(ctx, user)
if err != nil {
msg := fmt.Errorf("error storing user %v: %v", user, err)
unknownErrorWithMessage(w, msg, h.Logger)
return
}
res := newUserResponse(user)
encodeJSON(w, http.StatusOK, res, h.Logger)
}

View File

@ -1,68 +0,0 @@
import PermissionsTable from 'src/shared/components/PermissionsTable';
import React from 'react';
import {shallow} from 'enzyme';
import sinon from 'sinon';
describe('Shared.Components.PermissionsTable', function() {
it('renders a row for each permission', function() {
const permissions = [
{name: 'ViewChronograf', displayName: 'View Chronograf', description: 'Can use Chronograf tools', resources: ['db1']},
{name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']},
];
const wrapper = shallow(
<PermissionsTable
permissions={permissions}
showAddResource={true}
onRemovePermission={sinon.spy()}
/>
);
expect(wrapper.find('tr').length).to.equal(2);
expect(wrapper.find('table').text()).to.match(/View Chronograf/);
expect(wrapper.find('table').text()).to.match(/db1/);
expect(wrapper.find('table').text()).to.match(/Read/);
expect(wrapper.find('table').text()).to.match(/All Databases/);
});
it('only renders the control to add a resource when specified', function() {
const wrapper = shallow(
<PermissionsTable
permissions={[{name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']}]}
showAddResource={false}
onRemovePermission={sinon.spy()}
/>
);
expect(wrapper.find('.pill-add').length).to.equal(0);
});
it('only renders the "Remove" control when a callback is provided', function() {
const wrapper = shallow(
<PermissionsTable
permissions={[{name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']}]}
showAddResource={true}
/>
);
expect(wrapper.find('.remove-permission').length).to.equal(0);
});
describe('when a user clicks "Remove"', function() {
it('fires a callback', function() {
const permission = {name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']};
const cb = sinon.spy();
const wrapper = shallow(
<PermissionsTable
permissions={[permission]}
showAddResource={false}
onRemovePermission={cb}
/>
);
wrapper.find('button[children="Remove"]').at(0).simulate('click');
expect(cb.calledWith(permission)).to.be.true;
});
});
});

View File

@ -0,0 +1,88 @@
import timeSeriesToDygraph from 'src/utils/timeSeriesToDygraph';
describe('timeSeriesToDygraph', () => {
it('parses a raw InfluxDB response into a dygraph friendly data format', () => {
const influxResponse = [
{
"response":
{
"results": [
{
"series": [
{
"name":"m1",
"columns": ["time","f1"],
"values": [[1000, 1],[2000, 2]],
},
]
},
{
"series": [
{
"name":"m1",
"columns": ["time","f2"],
"values": [[2000, 3],[4000, 4]],
},
]
},
],
},
}
];
const actual = timeSeriesToDygraph(influxResponse);
const expected = {
fields: [
'time',
`m1.f1`,
`m1.f2`,
],
timeSeries: [
[new Date(1000), 1, null],
[new Date(2000), 2, 3],
[new Date(4000), null, 4],
],
};
expect(actual).to.deep.equal(expected);
});
it('can sort numerical timestamps correctly', () => {
const influxResponse = [
{
"response":
{
"results": [
{
"series": [
{
"name":"m1",
"columns": ["time","f1"],
"values": [[100, 1],[3000, 3],[200, 2]],
},
]
},
],
},
}
];
const actual = timeSeriesToDygraph(influxResponse);
const expected = {
fields: [
'time',
'm1.f1',
],
timeSeries: [
[new Date(100), 1],
[new Date(200), 2],
[new Date(3000), 3],
],
};
expect(actual).to.deep.equal(expected);
});
});

View File

@ -53,7 +53,7 @@ const CheckSources = React.createClass({
const {isFetching, sources} = nextState; const {isFetching, sources} = nextState;
const source = sources.find((s) => s.id === params.sourceID); const source = sources.find((s) => s.id === params.sourceID);
if (!isFetching && !source) { if (!isFetching && !source) {
return router.push(`/?redirectPath=${location.pathname}`); return router.push(`/sources/new?redirectPath=${location.pathname}`);
} }
if (!isFetching && !location.pathname.includes("/manage-sources")) { if (!isFetching && !location.pathname.includes("/manage-sources")) {

View File

@ -1,9 +1,9 @@
import React, {PropTypes} from 'react'; import React, {PropTypes} from 'react';
import {Link} from 'react-router';
import AlertsTable from '../components/AlertsTable'; import AlertsTable from '../components/AlertsTable';
import {getAlerts} from '../apis'; import {getAlerts} from '../apis';
import AJAX from 'utils/ajax'; import AJAX from 'utils/ajax';
import _ from 'lodash'; import _ from 'lodash';
import NoKapacitorError from '../../shared/components/NoKapacitorError';
// Kevin: because we were getting strange errors saying // Kevin: because we were getting strange errors saying
// "Failed prop type: Required prop `source` was not specified in `AlertsApp`." // "Failed prop type: Required prop `source` was not specified in `AlertsApp`."
@ -83,16 +83,10 @@ const AlertsApp = React.createClass({
const {source} = this.props; const {source} = this.props;
if (this.state.hasKapacitor) { if (this.state.hasKapacitor) {
component = ( component = (
<AlertsTable source={this.props.source} alerts={this.state.alerts} /> <AlertsTable source={source} alerts={this.state.alerts} />
); );
} else { } else {
const path = `/sources/${source.id}/kapacitor-config`; component = <NoKapacitorError source={source} />;
component = (
<div>
<p>The current source does not have an associated Kapacitor instance, please configure one.</p>
<Link to={path}>Add Kapacitor</Link>
</div>
);
} }
} }
return component; return component;

12
ui/src/auth/Login.js Normal file
View File

@ -0,0 +1,12 @@
import React from 'react';
import {withRouter} from 'react-router';
const Login = React.createClass({
render() {
return (
<a className="btn btn-primary" href="/oauth/github">Click me to log in</a>
);
},
});
export default withRouter(Login);

2
ui/src/auth/index.js Normal file
View File

@ -0,0 +1,2 @@
import Login from './Login';
export {Login};

View File

@ -2,11 +2,11 @@ import {proxy} from 'utils/queryUrlGenerator';
import AJAX from 'utils/ajax'; import AJAX from 'utils/ajax';
import _ from 'lodash'; import _ from 'lodash';
export function getCpuAndLoadForHosts(proxyLink) { export function getCpuAndLoadForHosts(proxyLink, telegrafDB) {
return proxy({ return proxy({
source: proxyLink, source: proxyLink,
query: `select mean(usage_user) from cpu where cpu = 'cpu-total' and time > now() - 10m group by host; select mean("load1") from "telegraf".."system" where time > now() - 10m group by host; select mean("Percent_Processor_Time") from win_cpu where time > now() - 10m group by host; select mean("Processor_Queue_Length") from win_system where time > now() - 10s group by host`, query: `select mean(usage_user) from cpu where cpu = 'cpu-total' and time > now() - 10m group by host; select mean("load1") from "system" where time > now() - 10m group by host; select mean("Percent_Processor_Time") from win_cpu where time > now() - 10m group by host; select mean("Processor_Queue_Length") from win_system where time > now() - 10s group by host`,
db: 'telegraf', db: telegrafDB,
}).then((resp) => { }).then((resp) => {
const hosts = {}; const hosts = {};
const precision = 100; const precision = 100;
@ -51,13 +51,13 @@ export function getMappings() {
}); });
} }
export function getAppsForHosts(proxyLink, hosts, appMappings) { export function getAppsForHosts(proxyLink, hosts, appMappings, telegrafDB) {
const measurements = appMappings.map((m) => `^${m.measurement}$`).join('|'); const measurements = appMappings.map((m) => `^${m.measurement}$`).join('|');
const measurementsToApps = _.zipObject(appMappings.map(m => m.measurement), appMappings.map(m => m.name)); const measurementsToApps = _.zipObject(appMappings.map(m => m.measurement), appMappings.map(m => m.name));
return proxy({ return proxy({
source: proxyLink, source: proxyLink,
query: `show series from /${measurements}/`, query: `show series from /${measurements}/`,
db: 'telegraf', db: telegrafDB,
}).then((resp) => { }).then((resp) => {
const newHosts = Object.assign({}, hosts); const newHosts = Object.assign({}, hosts);
const allSeries = _.get(resp, ['data', 'results', '0', 'series', '0', 'values'], []); const allSeries = _.get(resp, ['data', 'results', '0', 'series', '0', 'values'], []);
@ -81,3 +81,28 @@ export function getAppsForHosts(proxyLink, hosts, appMappings) {
return newHosts; return newHosts;
}); });
} }
export function getMeasurementsForHost(source, host) {
return proxy({
source: source.links.proxy,
query: `SHOW MEASUREMENTS WHERE "host" = '${host}'`,
db: source.telegraf,
}).then(({data}) => {
if (_isEmpty(data) || _hasError(data)) {
return [];
}
const series = data.results[0].series[0];
return series.values.map((measurement) => {
return measurement[0];
});
});
}
function _isEmpty(resp) {
return !resp.results[0].series;
}
function _hasError(resp) {
return !!resp.results[0].error;
}

View File

@ -2,7 +2,7 @@ import React, {PropTypes} from 'react';
import LayoutRenderer from 'shared/components/LayoutRenderer'; import LayoutRenderer from 'shared/components/LayoutRenderer';
import TimeRangeDropdown from '../../shared/components/TimeRangeDropdown'; import TimeRangeDropdown from '../../shared/components/TimeRangeDropdown';
import timeRanges from 'hson!../../shared/data/timeRanges.hson'; import timeRanges from 'hson!../../shared/data/timeRanges.hson';
import {getMappings, getAppsForHosts} from '../apis'; import {getMappings, getAppsForHosts, getMeasurementsForHost} from 'src/hosts/apis';
import {fetchLayouts} from 'shared/apis'; import {fetchLayouts} from 'shared/apis';
export const HostPage = React.createClass({ export const HostPage = React.createClass({
@ -11,6 +11,7 @@ export const HostPage = React.createClass({
links: PropTypes.shape({ links: PropTypes.shape({
proxy: PropTypes.string.isRequired, proxy: PropTypes.string.isRequired,
}).isRequired, }).isRequired,
telegraf: PropTypes.string.isRequired,
}), }),
params: PropTypes.shape({ params: PropTypes.shape({
hostID: PropTypes.string.isRequired, hostID: PropTypes.string.isRequired,
@ -32,24 +33,28 @@ export const HostPage = React.createClass({
}, },
componentDidMount() { componentDidMount() {
const hosts = {[this.props.params.hostID]: {name: this.props.params.hostID}}; const {source, params} = this.props;
const hosts = {[params.hostID]: {name: params.hostID}};
// fetching layouts and mappings can be done at the same time // fetching layouts and mappings can be done at the same time
fetchLayouts().then(({data: {layouts}}) => { fetchLayouts().then(({data: {layouts}}) => {
getMappings().then(({data: {mappings}}) => { getMappings().then(({data: {mappings}}) => {
getAppsForHosts(this.props.source.links.proxy, hosts, mappings).then((newHosts) => { getAppsForHosts(source.links.proxy, hosts, mappings, source.telegraf).then((newHosts) => {
getMeasurementsForHost(source, params.hostID).then((measurements) => {
const host = newHosts[this.props.params.hostID]; const host = newHosts[this.props.params.hostID];
const filteredLayouts = layouts.filter((layout) => { const filteredLayouts = layouts.filter((layout) => {
const focusedApp = this.props.location.query.app; const focusedApp = this.props.location.query.app;
if (focusedApp) { if (focusedApp) {
return layout.app === focusedApp; return layout.app === focusedApp;
} }
return host.apps && host.apps.includes(layout.app);
return host.apps && host.apps.includes(layout.app) && measurements.includes(layout.measurement);
}); });
this.setState({layouts: filteredLayouts}); this.setState({layouts: filteredLayouts});
}); });
}); });
}); });
});
}, },
handleChooseTimeRange({lower}) { handleChooseTimeRange({lower}) {
@ -60,7 +65,7 @@ export const HostPage = React.createClass({
renderLayouts(layouts) { renderLayouts(layouts) {
const autoRefreshMs = 15000; const autoRefreshMs = 15000;
const {timeRange} = this.state; const {timeRange} = this.state;
const source = this.props.source.links.proxy; const {source} = this.props;
let layoutCells = []; let layoutCells = [];
layouts.forEach((layout) => { layouts.forEach((layout) => {
@ -70,7 +75,7 @@ export const HostPage = React.createClass({
layoutCells.forEach((cell, i) => { layoutCells.forEach((cell, i) => {
cell.queries.forEach((q) => { cell.queries.forEach((q) => {
q.text = q.query; q.text = q.query;
q.database = q.db; q.database = source.telegraf;
}); });
cell.x = (i * 4 % 12); // eslint-disable-line no-magic-numbers cell.x = (i * 4 % 12); // eslint-disable-line no-magic-numbers
cell.y = 0; cell.y = 0;
@ -81,7 +86,7 @@ export const HostPage = React.createClass({
timeRange={timeRange} timeRange={timeRange}
cells={layoutCells} cells={layoutCells}
autoRefreshMs={autoRefreshMs} autoRefreshMs={autoRefreshMs}
source={source} source={source.links.proxy}
host={this.props.params.hostID} host={this.props.params.hostID}
/> />
); );

View File

@ -12,6 +12,7 @@ export const HostsPage = React.createClass({
links: PropTypes.shape({ links: PropTypes.shape({
proxy: PropTypes.string.isRequired, proxy: PropTypes.string.isRequired,
}).isRequired, }).isRequired,
telegraf: PropTypes.string.isRequired,
}), }),
addFlashMessage: PropTypes.func, addFlashMessage: PropTypes.func,
}, },
@ -25,11 +26,11 @@ export const HostsPage = React.createClass({
componentDidMount() { componentDidMount() {
const {source, addFlashMessage} = this.props; const {source, addFlashMessage} = this.props;
Promise.all([ Promise.all([
getCpuAndLoadForHosts(source.links.proxy), getCpuAndLoadForHosts(source.links.proxy, source.telegraf),
getMappings(), getMappings(),
]).then(([hosts, {data: {mappings}}]) => { ]).then(([hosts, {data: {mappings}}]) => {
this.setState({hosts}); this.setState({hosts});
getAppsForHosts(source.links.proxy, hosts, mappings).then((newHosts) => { getAppsForHosts(source.links.proxy, hosts, mappings, source.telegraf).then((newHosts) => {
this.setState({hosts: newHosts}); this.setState({hosts: newHosts});
}).catch(() => { }).catch(() => {
addFlashMessage({type: 'error', text: 'Unable to get apps for hosts'}); addFlashMessage({type: 'error', text: 'Unable to get apps for hosts'});

View File

@ -1,25 +1,24 @@
import React, {PropTypes} from 'react'; import React from 'react';
import {render} from 'react-dom'; import {render} from 'react-dom';
import {Provider} from 'react-redux'; import {Provider} from 'react-redux';
import {Router, Route, browserHistory} from 'react-router'; import {Router, Route, browserHistory, Redirect} from 'react-router';
import App from 'src/App'; import App from 'src/App';
import AlertsApp from 'src/alerts'; import AlertsApp from 'src/alerts';
import CheckSources from 'src/CheckSources'; import CheckSources from 'src/CheckSources';
import {HostsPage, HostPage} from 'src/hosts'; import {HostsPage, HostPage} from 'src/hosts';
import {KubernetesPage} from 'src/kubernetes'; import {KubernetesPage} from 'src/kubernetes';
import {Login} from 'src/auth';
import {KapacitorPage, KapacitorRulePage, KapacitorRulesPage, KapacitorTasksPage} from 'src/kapacitor'; import {KapacitorPage, KapacitorRulePage, KapacitorRulesPage, KapacitorTasksPage} from 'src/kapacitor';
import DataExplorer from 'src/chronograf'; import DataExplorer from 'src/chronograf';
import {CreateSource, SourceForm, ManageSources} from 'src/sources'; import {CreateSource, SourceForm, ManageSources} from 'src/sources';
import NotFound from 'src/shared/components/NotFound'; import NotFound from 'src/shared/components/NotFound';
import NoClusterError from 'src/shared/components/NoClusterError';
import configureStore from 'src/store/configureStore'; import configureStore from 'src/store/configureStore';
import {getSources} from 'shared/apis'; import {getMe, getSources} from 'shared/apis';
import {receiveMe} from 'shared/actions/me';
import 'src/style/enterprise_style/application.scss'; import 'src/style/enterprise_style/application.scss';
const {number, shape, string, bool} = PropTypes;
const defaultTimeRange = {upper: null, lower: 'now() - 15m'}; const defaultTimeRange = {upper: null, lower: 'now() - 15m'};
const lsTimeRange = window.localStorage.getItem('timeRange'); const lsTimeRange = window.localStorage.getItem('timeRange');
const parsedTimeRange = JSON.parse(lsTimeRange) || {}; const parsedTimeRange = JSON.parse(lsTimeRange) || {};
@ -28,38 +27,15 @@ const timeRange = Object.assign(defaultTimeRange, parsedTimeRange);
const store = configureStore({timeRange}); const store = configureStore({timeRange});
const rootNode = document.getElementById('react-root'); const rootNode = document.getElementById('react-root');
const HTTP_SERVER_ERROR = 500;
const Root = React.createClass({ const Root = React.createClass({
getInitialState() { getInitialState() {
return { return {
me: { loggedIn: null,
id: 1,
name: 'Chronograf',
email: 'foo@example.com',
admin: true,
},
isFetching: false,
hasReadPermission: false,
clusterStatus: null,
}; };
}, },
componentDidMount() {
childContextTypes: { this.checkAuth();
me: shape({
id: number.isRequired,
name: string.isRequired,
email: string.isRequired,
admin: bool.isRequired,
}),
}, },
getChildContext() {
return {
me: this.state.me,
};
},
activeSource(sources) { activeSource(sources) {
const defaultSource = sources.find((s) => s.default); const defaultSource = sources.find((s) => s.default);
if (defaultSource && defaultSource.id) { if (defaultSource && defaultSource.id) {
@ -68,29 +44,53 @@ const Root = React.createClass({
return sources[0]; return sources[0];
}, },
redirectToHosts(_, replace, callback) { redirectFromRoot(_, replace, callback) {
getSources().then(({data: {sources}}) => { getSources().then(({data: {sources}}) => {
if (sources && sources.length) { if (sources && sources.length) {
const path = `/sources/${this.activeSource(sources).id}/hosts`; const path = `/sources/${this.activeSource(sources).id}/hosts`;
replace(path); replace(path);
} }
callback(); callback();
}).catch(callback); });
},
checkAuth() {
if (store.getState().me.links) {
return this.setState({loggedIn: true});
}
getMe().then(({data: me}) => {
store.dispatch(receiveMe(me));
this.setState({loggedIn: true});
}).catch((err) => {
const AUTH_DISABLED = 418;
if (err.response.status === AUTH_DISABLED) {
return this.setState({loggedIn: true});
// Could store a boolean indicating auth is not set up
}
this.setState({loggedIn: false});
});
}, },
render() { render() {
if (this.state.isFetching) { if (this.state.loggedIn === null) {
return null; return <div className="page-spinner"></div>;
} }
if (this.state.loggedIn === false) {
if (this.state.clusterStatus === HTTP_SERVER_ERROR) {
return <NoClusterError />;
}
return ( return (
<Provider store={store}> <Provider store={store}>
<Router history={browserHistory}> <Router history={browserHistory}>
<Route path="/" component={CreateSource} onEnter={this.redirectToHosts} /> <Route path="/login" component={Login} />
<Redirect from="*" to="/login" />
</Router>
</Provider>
);
}
return (
<Provider store={store}>
<Router history={browserHistory}>
<Route path="/" component={CreateSource} onEnter={this.redirectFromRoot} />
<Route path="/sources/new" component={CreateSource} />
<Route path="/sources/:sourceID" component={App}> <Route path="/sources/:sourceID" component={App}>
<Route component={CheckSources}> <Route component={CheckSources}>
<Route path="manage-sources" component={ManageSources} /> <Route path="manage-sources" component={ManageSources} />

View File

@ -43,9 +43,8 @@ export function loadDefaultRule() {
}; };
} }
export function fetchRules(source) { export function fetchRules(kapacitor) {
return (dispatch) => { return (dispatch) => {
getKapacitor(source).then((kapacitor) => {
getRules(kapacitor).then(({data: {rules}}) => { getRules(kapacitor).then(({data: {rules}}) => {
dispatch({ dispatch({
type: 'LOAD_RULES', type: 'LOAD_RULES',
@ -54,7 +53,6 @@ export function fetchRules(source) {
}, },
}); });
}); });
});
}; };
} }

View File

@ -2,11 +2,14 @@ import React, {PropTypes} from 'react';
import {connect} from 'react-redux'; import {connect} from 'react-redux';
import {bindActionCreators} from 'redux'; import {bindActionCreators} from 'redux';
import {Link} from 'react-router'; import {Link} from 'react-router';
import * as kapacitorActionCreators from 'src/kapacitor/actions/view'; import {getKapacitor} from 'src/shared/apis';
import * as kapacitorActionCreators from '../actions/view';
import NoKapacitorError from '../../shared/components/NoKapacitorError';
export const KapacitorRulesPage = React.createClass({ export const KapacitorRulesPage = React.createClass({
propTypes: { propTypes: {
source: PropTypes.shape({ source: PropTypes.shape({
id: PropTypes.string.isRequired,
links: PropTypes.shape({ links: PropTypes.shape({
proxy: PropTypes.string.isRequired, proxy: PropTypes.string.isRequired,
self: PropTypes.string.isRequired, self: PropTypes.string.isRequired,
@ -26,8 +29,20 @@ export const KapacitorRulesPage = React.createClass({
addFlashMessage: PropTypes.func, addFlashMessage: PropTypes.func,
}, },
getInitialState() {
return {
hasKapacitor: false,
loading: true,
};
},
componentDidMount() { componentDidMount() {
this.props.actions.fetchRules(this.props.source); getKapacitor(this.props.source).then((kapacitor) => {
if (kapacitor) {
this.props.actions.fetchRules(kapacitor);
}
this.setState({loading: false, hasKapacitor: !!kapacitor});
});
}, },
handleDeleteRule(rule) { handleDeleteRule(rule) {
@ -35,20 +50,15 @@ export const KapacitorRulesPage = React.createClass({
actions.deleteRule(rule); actions.deleteRule(rule);
}, },
render() { renderSubComponent() {
const {source} = this.props; const {source} = this.props;
const {hasKapacitor, loading} = this.state;
return ( let component;
<div className="kapacitor-rules-page"> if (loading) {
<div className="chronograf-header"> component = (<p>Loading...</p>);
<div className="chronograf-header__container"> } else if (hasKapacitor) {
<div className="chronograf-header__left"> component = (
<h1>Kapacitor Rules</h1>
</div>
</div>
</div>
<div className="hosts-page-scroll-container">
<div className="container-fluid">
<div className="panel panel-minimal"> <div className="panel panel-minimal">
<div className="panel-heading u-flex u-ai-center u-jc-space-between"> <div className="panel-heading u-flex u-ai-center u-jc-space-between">
<h2 className="panel-title">Alert Rules</h2> <h2 className="panel-title">Alert Rules</h2>
@ -71,6 +81,26 @@ export const KapacitorRulesPage = React.createClass({
</table> </table>
</div> </div>
</div> </div>
);
} else {
component = <NoKapacitorError source={source} />;
}
return component;
},
render() {
return (
<div className="kapacitor-rules-page">
<div className="chronograf-header">
<div className="chronograf-header__container">
<div className="chronograf-header__left">
<h1>Kapacitor Rules</h1>
</div>
</div>
</div>
<div className="hosts-page-scroll-container">
<div className="container-fluid">
{this.renderSubComponent()}
</div> </div>
</div> </div>
</div> </div>

View File

@ -9,6 +9,7 @@ export const KubernetesPage = React.createClass({
links: PropTypes.shape({ links: PropTypes.shape({
proxy: PropTypes.string.isRequired, proxy: PropTypes.string.isRequired,
}).isRequired, }).isRequired,
telegraf: PropTypes.string.isRequired,
}), }),
layouts: PropTypes.arrayOf(PropTypes.shape().isRequired).isRequired, layouts: PropTypes.arrayOf(PropTypes.shape().isRequired).isRequired,
}, },
@ -23,7 +24,7 @@ export const KubernetesPage = React.createClass({
renderLayouts(layouts) { renderLayouts(layouts) {
const autoRefreshMs = 15000; const autoRefreshMs = 15000;
const {timeRange} = this.state; const {timeRange} = this.state;
const source = this.props.source.links.proxy; const {source} = this.props;
let layoutCells = []; let layoutCells = [];
layouts.forEach((layout) => { layouts.forEach((layout) => {
@ -33,7 +34,7 @@ export const KubernetesPage = React.createClass({
layoutCells.forEach((cell, i) => { layoutCells.forEach((cell, i) => {
cell.queries.forEach((q) => { cell.queries.forEach((q) => {
q.text = q.query; q.text = q.query;
q.database = q.db; q.database = source.telegraf;
}); });
cell.x = (i * 4 % 12); // eslint-disable-line no-magic-numbers cell.x = (i * 4 % 12); // eslint-disable-line no-magic-numbers
cell.y = 0; cell.y = 0;
@ -44,7 +45,7 @@ export const KubernetesPage = React.createClass({
timeRange={timeRange} timeRange={timeRange}
cells={layoutCells} cells={layoutCells}
autoRefreshMs={autoRefreshMs} autoRefreshMs={autoRefreshMs}
source={source} source={source.links.proxy}
/> />
); );
}, },
@ -66,12 +67,12 @@ export const KubernetesPage = React.createClass({
return ( return (
<div className="host-dashboard hosts-page"> <div className="host-dashboard hosts-page">
<div className="enterprise-header hosts-dashboard-header"> <div className="chronograf-header hosts-dashboard-header">
<div className="enterprise-header__container"> <div className="chronograf-header__container">
<div className="enterprise-header__left"> <div className="chronograf-header__left">
<h1>Kubernetes Dashboard</h1> <h1>Kubernetes Dashboard</h1>
</div> </div>
<div className="enterprise-header__right"> <div className="chronograf-header__right">
<h1>Range:</h1> <h1>Range:</h1>
<TimeRangeDropdown onChooseTimeRange={this.handleChooseTimeRange} selected={timeRange.inputValue} /> <TimeRangeDropdown onChooseTimeRange={this.handleChooseTimeRange} selected={timeRange.inputValue} />
</div> </div>

View File

@ -0,0 +1,14 @@
export function receiveMe(me) {
return {
type: 'ME_RECEIVED',
payload: {
me,
},
};
}
export function logout() {
return {
type: 'LOGOUT',
};
}

View File

@ -7,6 +7,13 @@ export function fetchLayouts() {
}); });
} }
export function getMe() {
return AJAX({
url: `/chronograf/v1/me`,
method: 'GET',
});
}
export function getSources() { export function getSources() {
return AJAX({ return AJAX({
url: '/chronograf/v1/sources', url: '/chronograf/v1/sources',

View File

@ -1,79 +0,0 @@
import React, {PropTypes} from 'react';
const {arrayOf, number, shape, func, string} = PropTypes;
const AddClusterAccounts = React.createClass({
propTypes: {
clusters: arrayOf(shape({
id: number.isRequired,
cluster_users: arrayOf(shape({
name: string.isRequired,
})),
dipslay_name: string,
cluster_id: string.isRequired,
})).isRequired,
onSelectClusterAccount: func.isRequired,
headerText: string,
},
getDefaultProps() {
return {
headerText: 'Pair With Cluster Accounts',
};
},
handleSelectClusterAccount(e, clusterID) {
this.props.onSelectClusterAccount({
clusterID,
accountName: e.target.value,
});
},
render() {
return (
<div>
{
this.props.clusters.map((cluster, i) => {
return (
<div key={i} className="form-grid">
<div className="form-group col-sm-6">
{i === 0 ? <label>Cluster</label> : null}
<div className="form-control-static">
{cluster.display_name || cluster.cluster_id}
</div>
</div>
<div className="form-group col-sm-6">
{i === 0 ? <label>Account</label> : null}
{this.renderClusterUsers(cluster)}
</div>
</div>
);
})
}
</div>
);
},
renderClusterUsers(cluster) {
if (!cluster.cluster_users) {
return (
<select disabled={true} defaultValue="No cluster accounts" className="form-control" id="cluster-account">
<option>No cluster accounts</option>
</select>
);
}
return (
<select onChange={(e) => this.handleSelectClusterAccount(e, cluster.cluster_id)} className="form-control">
<option value="">No Association</option>
{
cluster.cluster_users.map((cu) => {
return <option value={cu.name} key={cu.name}>{cu.name}</option>;
})
}
</select>
);
},
});
export default AddClusterAccounts;

View File

@ -1,124 +0,0 @@
import React, {PropTypes} from 'react';
const CLUSTER_WIDE_PERMISSIONS = ["CreateDatabase", "AddRemoveNode", "ManageShard", "DropDatabase", "CopyShard", "Rebalance"];
const AddPermissionModal = React.createClass({
propTypes: {
activeCluster: PropTypes.string.isRequired,
permissions: PropTypes.arrayOf(PropTypes.shape({
name: PropTypes.string.isRequired,
displayName: PropTypes.string.isRequired,
description: PropTypes.string.isRequired,
})),
databases: PropTypes.arrayOf(PropTypes.string.isRequired).isRequired,
onAddPermission: PropTypes.func.isRequired,
},
getInitialState() {
return {
selectedPermission: null,
selectedDatabase: '',
};
},
handlePermissionClick(permission) {
this.setState({
selectedPermission: permission,
selectedDatabase: '',
});
},
handleDatabaseChange(e) {
this.setState({selectedDatabase: e.target.value});
},
handleSubmit(e) {
e.preventDefault();
this.props.onAddPermission({
name: this.state.selectedPermission,
resources: [this.state.selectedDatabase],
});
$('#addPermissionModal').modal('hide'); // eslint-disable-line no-undef
},
render() {
const {permissions} = this.props;
return (
<div className="modal fade" id="addPermissionModal" tabIndex="-1" role="dialog">
<div className="modal-dialog">
<div className="modal-content">
<div className="modal-header">
<button type="button" className="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
<h4 className="modal-title">Select a Permission to Add</h4>
</div>
<form onSubmit={this.handleSubmit}>
<div className="modal-body">
<div className="well permission-list">
<ul>
{permissions.map((perm) => {
return (
<li key={perm.name}>
<input onClick={() => this.handlePermissionClick(perm.name)} type="radio" name="permissionName" value={`${perm.name}`} id={`permission-${perm.name}`}></input>
<label htmlFor={`permission-${perm.name}`}>
{perm.displayName}
<br/>
<span className="permission-description">{perm.description}</span>
</label>
</li>
);
})}
</ul>
</div>
{this.renderOptions()}
</div>
{this.renderFooter()}
</form>
</div>
</div>
</div>
);
},
renderFooter() {
return (
<div className="modal-footer">
<button className="btn btn-default" data-dismiss="modal">Cancel</button>
<input disabled={!this.state.selectedPermission} className="btn btn-success" type="submit" value="Add Permission"></input>
</div>
);
},
renderOptions() {
return (
<div>
{this.state.selectedPermission ? this.renderDatabases() : null}
</div>
);
},
renderDatabases() {
const isClusterWide = CLUSTER_WIDE_PERMISSIONS.includes(this.state.selectedPermission);
if (!this.props.databases.length || isClusterWide) {
return null;
}
return (
<div>
<div className="form-grid">
<div className="form-group col-md-12">
<label htmlFor="#permissions-database">Limit Permission to...</label>
<select onChange={this.handleDatabaseChange} className="form-control" name="database" id="permissions-database">
<option value={''}>All Databases</option>
{this.props.databases.map((databaseName, i) => <option key={i}>{databaseName}</option>)}
</select>
</div>
</div>
</div>
);
},
});
export default AddPermissionModal;

View File

@ -60,7 +60,7 @@ export default function AutoRefresh(ComposedComponent) {
const newSeries = []; const newSeries = [];
queries.forEach(({host, database, rp, text}) => { queries.forEach(({host, database, rp, text}) => {
_fetchTimeSeries(host, database, rp, text).then((resp) => { _fetchTimeSeries(host, database, rp, text).then((resp) => {
newSeries.push({identifier: host, response: resp.data}); newSeries.push({response: resp.data});
count += 1; count += 1;
if (count === queries.length) { if (count === queries.length) {
this.setState({ this.setState({

View File

@ -1,24 +0,0 @@
import React from 'react';
const {node} = React.PropTypes;
const ClusterError = React.createClass({
propTypes: {
children: node.isRequired,
},
render() {
return (
<div className="container-fluid">
<div className="row">
<div className="col-sm-6 col-sm-offset-3">
<div className="panel panel-error panel-summer">
{this.props.children}
</div>
</div>
</div>
</div>
);
},
});
export default ClusterError;

View File

@ -131,6 +131,7 @@ export default React.createClass({
const timeSeries = this.getTimeSeries(); const timeSeries = this.getTimeSeries();
const {fields, yRange} = this.props; const {fields, yRange} = this.props;
dygraph.updateOptions({ dygraph.updateOptions({
labels: fields, labels: fields,
file: timeSeries, file: timeSeries,

View File

@ -1,21 +0,0 @@
import React from 'react';
import ClusterError from './ClusterError';
const InsufficientPermissions = React.createClass({
render() {
return (
<ClusterError>
<div className="panel-heading text-center">
<h2 className="deluxe">
{`Your account has insufficient permissions`}
</h2>
</div>
<div className="panel-body text-center">
<h3 className="deluxe">Talk to your admin to get additional permissions for access</h3>
</div>
</ClusterError>
);
},
});
export default InsufficientPermissions;

View File

@ -101,7 +101,7 @@ export const LayoutRenderer = React.createClass({
render() { render() {
const layoutMargin = 4; const layoutMargin = 4;
return ( return (
<GridLayout layout={this.state.layout} isDraggable={false} isResizable={false} cols={12} rowHeight={83.5} margin={[layoutMargin, layoutMargin]} containerPadding={[0, 0]}> <GridLayout layout={this.state.layout} isDraggable={false} isResizable={false} cols={12} rowHeight={83.5} margin={[layoutMargin, layoutMargin]} containerPadding={[0, 0]} useCSSTransforms={false} >
{this.generateGraphs()} {this.generateGraphs()}
</GridLayout> </GridLayout>
); );

View File

@ -1,35 +0,0 @@
import React from 'react';
import errorCopy from 'hson!shared/copy/errors.hson';
const NoClusterError = React.createClass({
render() {
return (
<div>
<div className="container">
<div className="row">
<div className="col-sm-6 col-sm-offset-3">
<div className="panel panel-error panel-summer">
<div className="panel-heading text-center">
<h2 className="deluxe">
{errorCopy.noCluster.head}
</h2>
</div>
<div className="panel-body text-center">
<h3 className="deluxe">How to resolve:</h3>
<p>
{errorCopy.noCluster.body}
</p>
<div className="text-center">
<button className="btn btn-center btn-success" onClick={() => window.location.reload()}>My Cluster Is Back Up</button>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
);
},
});
export default NoClusterError;

View File

@ -1,27 +0,0 @@
import React from 'react';
const NoClusterLinksError = React.createClass({
render() {
return (
<div className="container-fluid">
<div className="row">
<div className="col-sm-6 col-sm-offset-3">
<div className="panel panel-error panel-summer">
<div className="panel-heading text-center">
<h2 className="deluxe">
This user is not associated with any cluster accounts!
</h2>
</div>
<div className="panel-body text-center">
<p>Many features in Chronograf require your user to be associated with a cluster account.</p>
<p>Ask an administrator to associate your user with a cluster account.</p>
</div>
</div>
</div>
</div>
</div>
);
},
});
export default NoClusterLinksError;

View File

@ -0,0 +1,22 @@
import React, {PropTypes} from 'react';
import {Link} from 'react-router';
const NoKapacitorError = React.createClass({
propTypes: {
source: PropTypes.shape({
id: PropTypes.string.isRequired,
}).isRequired,
},
render() {
const path = `/sources/${this.props.source.id}/kapacitor-config`;
return (
<div>
<p>The current source does not have an associated Kapacitor instance, please configure one.</p>
<Link to={path}>Add Kapacitor</Link>
</div>
);
},
});
export default NoKapacitorError;

View File

@ -1,76 +0,0 @@
import React, {PropTypes} from 'react';
const {arrayOf, shape, string} = PropTypes;
const PermissionsTable = React.createClass({
propTypes: {
permissions: PropTypes.arrayOf(shape({
name: string.isRequired,
displayName: string.isRequired,
description: string.isRequired,
resources: arrayOf(string.isRequired).isRequired,
})).isRequired,
showAddResource: PropTypes.bool,
onRemovePermission: PropTypes.func,
},
getDefaultProps() {
return {
permissions: [],
showAddResource: false,
};
},
handleAddResourceClick() {
// TODO
},
handleRemovePermission(permission) {
this.props.onRemovePermission(permission);
},
render() {
if (!this.props.permissions.length) {
return (
<div className="generic-empty-state">
<span className="icon alert-triangle"></span>
<h4>This Role has no Permissions</h4>
</div>
);
}
return (
<div className="panel-body">
<table className="table permissions-table">
<tbody>
{this.props.permissions.map((p) => (
<tr key={p.name}>
<td>{p.displayName}</td>
<td>
{p.resources.map((resource, i) => <div key={i} className="pill">{resource === '' ? 'All Databases' : resource}</div>)}
{this.props.showAddResource ? (
<div onClick={this.handleAddResourceClick} className="pill-add" data-toggle="modal" data-target="#addPermissionModal">
<span className="icon plus"></span>
</div>
) : null}
</td>
{this.props.onRemovePermission ? (
<td className="remove-permission">
<button
onClick={() => this.handleRemovePermission(p)}
type="button"
className="btn btn-sm btn-link-danger">
Remove
</button>
</td>
) : null}
</tr>
))}
</tbody>
</table>
</div>
);
},
});
export default PermissionsTable;

View File

@ -1,86 +0,0 @@
import React, {PropTypes} from 'react';
import {Link} from 'react-router';
import PermissionsTable from 'src/shared/components/PermissionsTable';
const {arrayOf, bool, func, shape, string} = PropTypes;
const RolePanels = React.createClass({
propTypes: {
roles: arrayOf(shape({
name: string.isRequired,
users: arrayOf(string.isRequired).isRequired,
permissions: arrayOf(shape({
name: string.isRequired,
displayName: string.isRequired,
description: string.isRequired,
resources: arrayOf(string.isRequired).isRequired,
})).isRequired,
})).isRequired,
showUserCount: bool,
onRemoveAccountFromRole: func,
},
getDefaultProps() {
return {
showUserCount: false,
};
},
render() {
const {roles} = this.props;
if (!roles.length) {
return (
<div className="panel panel-default">
<div className="panel-body">
<div className="generic-empty-state">
<span className="icon alert-triangle"></span>
<h4>This user has no roles</h4>
</div>
</div>
</div>
);
}
return (
<div className="panel-group sub-page" role="tablist">
{roles.map((role) => {
const id = role.name.replace(/[^\w]/gi, '');
return (
<div key={role.name} className="panel panel-default">
<div className="panel-heading" role="tab" id={`heading${id}`}>
<h4 className="panel-title u-flex u-ai-center u-jc-space-between">
<a className="collapsed" role="button" data-toggle="collapse" href={`#collapse-role-${id}`}>
<span className="caret"></span>
{role.name}
</a>
<div>
{this.props.showUserCount ? <p>{role.users ? role.users.length : 0} Users</p> : null}
{this.props.onRemoveAccountFromRole ? (
<button
onClick={() => this.props.onRemoveAccountFromRole(role)}
data-toggle="modal"
data-target="#removeAccountFromRoleModal"
type="button"
className="btn btn-sm btn-link">
Remove
</button>
) : null}
<Link to={`/roles/${encodeURIComponent(role.name)}`} className="btn btn-xs btn-link">
Go To Role
</Link>
</div>
</h4>
</div>
<div id={`collapse-role-${id}`} className="panel-collapse collapse" role="tabpanel">
<PermissionsTable permissions={role.permissions} />
</div>
</div>
);
})}
</div>
);
},
});
export default RolePanels;

View File

@ -1,95 +0,0 @@
import React, {PropTypes} from 'react';
import {Link} from 'react-router';
import classNames from 'classnames';
const {func, shape, arrayOf, string} = PropTypes;
const UsersTable = React.createClass({
propTypes: {
users: arrayOf(shape({}).isRequired).isRequired,
activeCluster: string.isRequired,
onUserToDelete: func.isRequired,
me: shape({}).isRequired,
deleteText: string,
},
getDefaultProps() {
return {
deleteText: 'Delete',
};
},
handleSelectUserToDelete(user) {
this.props.onUserToDelete(user);
},
render() {
const {users, activeCluster, me} = this.props;
if (!users.length) {
return (
<div className="generic-empty-state">
<span className="icon user-outline"/>
<h4>No users</h4>
</div>
);
}
return (
<table className="table v-center users-table">
<tbody>
<tr>
<th></th>
<th>Name</th>
<th>Admin</th>
<th>Email</th>
<th></th>
</tr>
{
users.map((user) => {
const isMe = me.id === user.id;
return (
<tr key={user.id}>
<td></td>
<td>
<span>
<Link to={`/clusters/${activeCluster}/users/${user.id}`} title={`Go to ${user.name}'s profile`}>{user.name}</Link>
{isMe ? <em> (You) </em> : null}
</span>
</td>
<td className="admin-column">{this.renderAdminIcon(user.admin)}</td>
<td>{user.email}</td>
<td>
{this.renderDeleteButton(user)}
</td>
</tr>
);
})
}
</tbody>
</table>
);
},
renderAdminIcon(isAdmin) {
return <span className={classNames("icon", {"checkmark text-color-success": isAdmin, "remove text-color-danger": !isAdmin})}></span>;
},
renderDeleteButton(user) {
if (this.props.me.id === user.id) {
return <button type="button" className="btn btn-sm btn-link-danger disabled" title={`Cannot ${this.props.deleteText} Yourself`}>{this.props.deleteText}</button>;
}
return (
<button
onClick={() => this.handleSelectUserToDelete({id: user.id, name: user.name})}
type="button"
data-toggle="modal"
data-target="#deleteUsersModal"
className="btn btn-sm btn-link-danger"
>
{this.props.deleteText}
</button>
);
},
});
export default UsersTable;

View File

@ -0,0 +1,7 @@
import me from './me';
import notifications from './notifications';
export {
me,
notifications,
};

View File

@ -0,0 +1,17 @@
function getInitialState() {
return {};
}
const initialState = getInitialState();
export default function me(state = initialState, action) {
switch (action.type) {
case 'ME_RECEIVED': {
return action.payload.me;
}
case 'LOGOUT': {
return {};
}
}
return state;
}

View File

@ -1,20 +1,25 @@
import React, {PropTypes} from 'react'; import React, {PropTypes} from 'react';
import {NavBar, NavBlock, NavHeader, NavListItem} from 'src/side_nav/components/NavItems'; import {NavBar, NavBlock, NavHeader, NavListItem} from 'src/side_nav/components/NavItems';
const {string} = PropTypes; const {string, shape} = PropTypes;
const SideNav = React.createClass({ const SideNav = React.createClass({
propTypes: { propTypes: {
location: string.isRequired, location: string.isRequired,
sourceID: string.isRequired, sourceID: string.isRequired,
explorationID: string, explorationID: string,
me: shape({
email: string.isRequired,
}),
}, },
render() { render() {
const {location, sourceID, explorationID} = this.props; const {me, location, sourceID, explorationID} = this.props;
const sourcePrefix = `/sources/${sourceID}`; const sourcePrefix = `/sources/${sourceID}`;
const explorationSuffix = explorationID ? `/${explorationID}` : ''; const explorationSuffix = explorationID ? `/${explorationID}` : '';
const dataExplorerLink = `${sourcePrefix}/chronograf/data-explorer${explorationSuffix}`; const dataExplorerLink = `${sourcePrefix}/chronograf/data-explorer${explorationSuffix}`;
const loggedIn = !!(me && me.email);
return ( return (
<NavBar location={location}> <NavBar location={location}>
<div className="sidebar__logo"> <div className="sidebar__logo">
@ -39,6 +44,11 @@ const SideNav = React.createClass({
<NavListItem link={`${sourcePrefix}/manage-sources`}>InfluxDB</NavListItem> <NavListItem link={`${sourcePrefix}/manage-sources`}>InfluxDB</NavListItem>
<NavListItem link={`${sourcePrefix}/kapacitor-config`}>Kapacitor</NavListItem> <NavListItem link={`${sourcePrefix}/kapacitor-config`}>Kapacitor</NavListItem>
</NavBlock> </NavBlock>
{loggedIn ? (
<NavBlock icon="user-outline" className="sidebar__square-last">
<a className="sidebar__menu-item" href="/oauth/logout">Logout</a>
</NavBlock>
) : null}
</NavBar> </NavBar>
); );
}, },

View File

@ -1,41 +1,38 @@
import React, {PropTypes} from 'react'; import React, {PropTypes} from 'react';
import {connect} from 'react-redux';
import SideNav from '../components/SideNav'; import SideNav from '../components/SideNav';
const {func, string} = PropTypes; const {func, string, shape} = PropTypes;
const SideNavApp = React.createClass({ const SideNavApp = React.createClass({
propTypes: { propTypes: {
currentLocation: string.isRequired, currentLocation: string.isRequired,
addFlashMessage: func.isRequired, addFlashMessage: func.isRequired,
sourceID: string.isRequired, sourceID: string.isRequired,
explorationID: string, explorationID: string,
}, me: shape({
email: string.isRequired,
contextTypes: { }),
canViewChronograf: PropTypes.bool,
},
getInitialState() {
return {
clusters: [],
clusterToUpdate: '',
};
}, },
render() { render() {
const {currentLocation, sourceID, explorationID} = this.props; const {me, currentLocation, sourceID, explorationID} = this.props;
const {canViewChronograf} = this.context;
return ( return (
<SideNav <SideNav
sourceID={sourceID} sourceID={sourceID}
isAdmin={true}
canViewChronograf={canViewChronograf}
location={currentLocation} location={currentLocation}
explorationID={explorationID} explorationID={explorationID}
me={me}
/> />
); );
}, },
}); });
export default SideNavApp; function mapStateToProps(state) {
return {
me: state.me,
};
}
export default connect(mapStateToProps)(SideNavApp);

View File

@ -22,6 +22,7 @@ export const CreateSource = React.createClass({
username: this.sourceUser.value, username: this.sourceUser.value,
password: this.sourcePassword.value, password: this.sourcePassword.value,
isDefault: true, isDefault: true,
telegraf: this.sourceTelegraf.value,
}; };
createSource(source).then(({data: sourceFromServer}) => { createSource(source).then(({data: sourceFromServer}) => {
this.redirectToApp(sourceFromServer); this.redirectToApp(sourceFromServer);
@ -71,7 +72,10 @@ export const CreateSource = React.createClass({
<input ref={(r) => this.sourcePassword = r} className="form-control" id="password" type="password"></input> <input ref={(r) => this.sourcePassword = r} className="form-control" id="password" type="password"></input>
</div> </div>
</div> </div>
<div className="form-group col-xs-8 col-xs-offset-2">
<label htmlFor="telegraf">Telegraf database</label>
<input ref={(r) => this.sourceTelegraf = r} className="form-control" id="telegraf" type="text" value="telegraf"></input>
</div>
<div className="form-group col-xs-12 text-center"> <div className="form-group col-xs-12 text-center">
<button className="btn btn-success" type="submit">Create New Server</button> <button className="btn btn-success" type="submit">Create New Server</button>
</div> </div>

View File

@ -44,6 +44,7 @@ export const SourceForm = React.createClass({
username: this.sourceUsername.value, username: this.sourceUsername.value,
password: this.sourcePassword.value, password: this.sourcePassword.value,
'default': this.sourceDefault.checked, 'default': this.sourceDefault.checked,
telegraf: this.sourceTelegraf.value,
}); });
if (this.state.editMode) { if (this.state.editMode) {
updateSource(newSource).then(() => { updateSource(newSource).then(() => {
@ -117,6 +118,10 @@ export const SourceForm = React.createClass({
<label htmlFor="password">Password</label> <label htmlFor="password">Password</label>
<input type="password" name="password" ref={(r) => this.sourcePassword = r} className="form-control" id="password" onChange={this.onInputChange} value={source.password || ''}></input> <input type="password" name="password" ref={(r) => this.sourcePassword = r} className="form-control" id="password" onChange={this.onInputChange} value={source.password || ''}></input>
</div> </div>
<div className="form-group col-xs-8 col-xs-offset-2">
<label htmlFor="telegraf">Telegraf database</label>
<input type="text" name="telegraf" ref={(r) => this.sourceTelegraf = r} className="form-control" id="telegraf" onChange={this.onInputChange} value={source.telegraf || 'telegraf'}></input>
</div>
<div className="form-group col-xs-8 col-xs-offset-2"> <div className="form-group col-xs-8 col-xs-offset-2">
<div className="form-control-static"> <div className="form-control-static">
<input type="checkbox" id="defaultSourceCheckbox" defaultChecked={source.default} ref={(r) => this.sourceDefault = r} /> <input type="checkbox" id="defaultSourceCheckbox" defaultChecked={source.default} ref={(r) => this.sourceDefault = r} />

Some files were not shown because too many files have changed in this diff Show More