Update from Master

pull/583/head
Alex P 2016-11-29 13:11:46 -08:00
commit 17743326eb
102 changed files with 2220 additions and 2136 deletions

View File

@ -1,5 +1,7 @@
## v1.1.0 [unreleased]
- #586: Allow telegraf database in non-default locations
- #576: Fix broken zoom on graphs that aren't the first.
- #575: Add Varnish Layout
- #574: Fix broken graphs on Postgres Layouts by adding aggregates.

View File

@ -19,6 +19,7 @@ type Client struct {
SourcesStore *SourcesStore
ServersStore *ServersStore
LayoutStore *LayoutStore
UsersStore *UsersStore
AlertsStore *AlertsStore
}
@ -28,6 +29,7 @@ func NewClient() *Client {
c.SourcesStore = &SourcesStore{client: c}
c.ServersStore = &ServersStore{client: c}
c.AlertsStore = &AlertsStore{client: c}
c.UsersStore = &UsersStore{client: c}
c.LayoutStore = &LayoutStore{
client: c,
IDs: &uuid.V4{},
@ -65,6 +67,10 @@ func (c *Client) Open() error {
if _, err := tx.CreateBucketIfNotExists(AlertsBucket); err != nil {
return err
}
// Always create Users bucket.
if _, err := tx.CreateBucketIfNotExists(UsersBucket); err != nil {
return err
}
return nil
}); err != nil {
return err

View File

@ -51,6 +51,7 @@ func MarshalSource(s chronograf.Source) ([]byte, error) {
Password: s.Password,
URL: s.URL,
Default: s.Default,
Telegraf: s.Telegraf,
})
}
@ -68,6 +69,7 @@ func UnmarshalSource(data []byte, s *chronograf.Source) error {
s.Password = pb.Password
s.URL = pb.URL
s.Default = pb.Default
s.Telegraf = pb.Telegraf
return nil
}
@ -203,3 +205,23 @@ func UnmarshalAlertRule(data []byte, r *ScopedAlert) error {
r.KapaID = int(pb.KapaID)
return nil
}
// MarshalUser encodes a user to binary protobuf format.
func MarshalUser(u *chronograf.User) ([]byte, error) {
return proto.Marshal(&User{
ID: uint64(u.ID),
Email: u.Email,
})
}
// UnmarshalUser decodes a user from binary protobuf data.
func UnmarshalUser(data []byte, u *chronograf.User) error {
var pb User
if err := proto.Unmarshal(data, &pb); err != nil {
return err
}
u.ID = chronograf.UserID(pb.ID)
u.Email = pb.Email
return nil
}

View File

@ -16,6 +16,7 @@ It has these top-level messages:
Cell
Query
AlertRule
User
*/
package internal
@ -35,13 +36,13 @@ var _ = math.Inf
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type Exploration struct {
ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,json=name,proto3" json:"Name,omitempty"`
UserID int64 `protobuf:"varint,3,opt,name=UserID,json=userID,proto3" json:"UserID,omitempty"`
Data string `protobuf:"bytes,4,opt,name=Data,json=data,proto3" json:"Data,omitempty"`
CreatedAt int64 `protobuf:"varint,5,opt,name=CreatedAt,json=createdAt,proto3" json:"CreatedAt,omitempty"`
UpdatedAt int64 `protobuf:"varint,6,opt,name=UpdatedAt,json=updatedAt,proto3" json:"UpdatedAt,omitempty"`
Default bool `protobuf:"varint,7,opt,name=Default,json=default,proto3" json:"Default,omitempty"`
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
UserID int64 `protobuf:"varint,3,opt,name=UserID,proto3" json:"UserID,omitempty"`
Data string `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"`
CreatedAt int64 `protobuf:"varint,5,opt,name=CreatedAt,proto3" json:"CreatedAt,omitempty"`
UpdatedAt int64 `protobuf:"varint,6,opt,name=UpdatedAt,proto3" json:"UpdatedAt,omitempty"`
Default bool `protobuf:"varint,7,opt,name=Default,proto3" json:"Default,omitempty"`
}
func (m *Exploration) Reset() { *m = Exploration{} }
@ -50,13 +51,14 @@ func (*Exploration) ProtoMessage() {}
func (*Exploration) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} }
type Source struct {
ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,json=name,proto3" json:"Name,omitempty"`
Type string `protobuf:"bytes,3,opt,name=Type,json=type,proto3" json:"Type,omitempty"`
Username string `protobuf:"bytes,4,opt,name=Username,json=username,proto3" json:"Username,omitempty"`
Password string `protobuf:"bytes,5,opt,name=Password,json=password,proto3" json:"Password,omitempty"`
URL string `protobuf:"bytes,6,opt,name=URL,json=uRL,proto3" json:"URL,omitempty"`
Default bool `protobuf:"varint,7,opt,name=Default,json=default,proto3" json:"Default,omitempty"`
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
Type string `protobuf:"bytes,3,opt,name=Type,proto3" json:"Type,omitempty"`
Username string `protobuf:"bytes,4,opt,name=Username,proto3" json:"Username,omitempty"`
Password string `protobuf:"bytes,5,opt,name=Password,proto3" json:"Password,omitempty"`
URL string `protobuf:"bytes,6,opt,name=URL,proto3" json:"URL,omitempty"`
Default bool `protobuf:"varint,7,opt,name=Default,proto3" json:"Default,omitempty"`
Telegraf string `protobuf:"bytes,8,opt,name=Telegraf,proto3" json:"Telegraf,omitempty"`
}
func (m *Source) Reset() { *m = Source{} }
@ -65,12 +67,12 @@ func (*Source) ProtoMessage() {}
func (*Source) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} }
type Server struct {
ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,json=name,proto3" json:"Name,omitempty"`
Username string `protobuf:"bytes,3,opt,name=Username,json=username,proto3" json:"Username,omitempty"`
Password string `protobuf:"bytes,4,opt,name=Password,json=password,proto3" json:"Password,omitempty"`
URL string `protobuf:"bytes,5,opt,name=URL,json=uRL,proto3" json:"URL,omitempty"`
SrcID int64 `protobuf:"varint,6,opt,name=SrcID,json=srcID,proto3" json:"SrcID,omitempty"`
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
Username string `protobuf:"bytes,3,opt,name=Username,proto3" json:"Username,omitempty"`
Password string `protobuf:"bytes,4,opt,name=Password,proto3" json:"Password,omitempty"`
URL string `protobuf:"bytes,5,opt,name=URL,proto3" json:"URL,omitempty"`
SrcID int64 `protobuf:"varint,6,opt,name=SrcID,proto3" json:"SrcID,omitempty"`
}
func (m *Server) Reset() { *m = Server{} }
@ -79,10 +81,10 @@ func (*Server) ProtoMessage() {}
func (*Server) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} }
type Layout struct {
ID string `protobuf:"bytes,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"`
Application string `protobuf:"bytes,2,opt,name=Application,json=application,proto3" json:"Application,omitempty"`
Measurement string `protobuf:"bytes,3,opt,name=Measurement,json=measurement,proto3" json:"Measurement,omitempty"`
Cells []*Cell `protobuf:"bytes,4,rep,name=Cells,json=cells" json:"Cells,omitempty"`
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Application string `protobuf:"bytes,2,opt,name=Application,proto3" json:"Application,omitempty"`
Measurement string `protobuf:"bytes,3,opt,name=Measurement,proto3" json:"Measurement,omitempty"`
Cells []*Cell `protobuf:"bytes,4,rep,name=Cells" json:"Cells,omitempty"`
}
func (m *Layout) Reset() { *m = Layout{} }
@ -120,11 +122,11 @@ func (m *Cell) GetQueries() []*Query {
}
type Query struct {
Command string `protobuf:"bytes,1,opt,name=Command,json=command,proto3" json:"Command,omitempty"`
DB string `protobuf:"bytes,2,opt,name=DB,json=dB,proto3" json:"DB,omitempty"`
RP string `protobuf:"bytes,3,opt,name=RP,json=rP,proto3" json:"RP,omitempty"`
GroupBys []string `protobuf:"bytes,4,rep,name=GroupBys,json=groupBys" json:"GroupBys,omitempty"`
Wheres []string `protobuf:"bytes,5,rep,name=Wheres,json=wheres" json:"Wheres,omitempty"`
Command string `protobuf:"bytes,1,opt,name=Command,proto3" json:"Command,omitempty"`
DB string `protobuf:"bytes,2,opt,name=DB,proto3" json:"DB,omitempty"`
RP string `protobuf:"bytes,3,opt,name=RP,proto3" json:"RP,omitempty"`
GroupBys []string `protobuf:"bytes,4,rep,name=GroupBys" json:"GroupBys,omitempty"`
Wheres []string `protobuf:"bytes,5,rep,name=Wheres" json:"Wheres,omitempty"`
}
func (m *Query) Reset() { *m = Query{} }
@ -133,10 +135,10 @@ func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} }
type AlertRule struct {
ID string `protobuf:"bytes,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"`
JSON string `protobuf:"bytes,2,opt,name=JSON,json=jSON,proto3" json:"JSON,omitempty"`
SrcID int64 `protobuf:"varint,3,opt,name=SrcID,json=srcID,proto3" json:"SrcID,omitempty"`
KapaID int64 `protobuf:"varint,4,opt,name=KapaID,json=kapaID,proto3" json:"KapaID,omitempty"`
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
JSON string `protobuf:"bytes,2,opt,name=JSON,proto3" json:"JSON,omitempty"`
SrcID int64 `protobuf:"varint,3,opt,name=SrcID,proto3" json:"SrcID,omitempty"`
KapaID int64 `protobuf:"varint,4,opt,name=KapaID,proto3" json:"KapaID,omitempty"`
}
func (m *AlertRule) Reset() { *m = AlertRule{} }
@ -144,6 +146,16 @@ func (m *AlertRule) String() string { return proto.CompactTextString(
func (*AlertRule) ProtoMessage() {}
func (*AlertRule) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} }
type User struct {
ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Email string `protobuf:"bytes,2,opt,name=Email,proto3" json:"Email,omitempty"`
}
func (m *User) Reset() { *m = User{} }
func (m *User) String() string { return proto.CompactTextString(m) }
func (*User) ProtoMessage() {}
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{7} }
func init() {
proto.RegisterType((*Exploration)(nil), "internal.Exploration")
proto.RegisterType((*Source)(nil), "internal.Source")
@ -152,44 +164,45 @@ func init() {
proto.RegisterType((*Cell)(nil), "internal.Cell")
proto.RegisterType((*Query)(nil), "internal.Query")
proto.RegisterType((*AlertRule)(nil), "internal.AlertRule")
proto.RegisterType((*User)(nil), "internal.User")
}
func init() { proto.RegisterFile("internal.proto", fileDescriptorInternal) }
var fileDescriptorInternal = []byte{
// 529 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x93, 0x4d, 0xae, 0xd3, 0x30,
0x10, 0x80, 0xe5, 0x26, 0xce, 0x8f, 0x8b, 0x0a, 0xb2, 0x10, 0x8a, 0x10, 0x8b, 0x2a, 0x62, 0x51,
0x36, 0x6f, 0x01, 0x27, 0x68, 0x1b, 0x84, 0x0a, 0xa5, 0xaf, 0xb8, 0x54, 0xac, 0x58, 0x98, 0xc4,
0xd0, 0x40, 0xfe, 0x70, 0x6c, 0xda, 0x6c, 0xd9, 0xc2, 0x31, 0xb8, 0x01, 0x17, 0x44, 0xe3, 0x3a,
0xa4, 0x12, 0xe8, 0xe9, 0x2d, 0xbf, 0x99, 0x49, 0xfc, 0xcd, 0x8c, 0x4d, 0x26, 0x79, 0xa5, 0x84,
0xac, 0x78, 0x71, 0xd5, 0xc8, 0x5a, 0xd5, 0x34, 0xe8, 0x39, 0xfe, 0x8d, 0xc8, 0xf8, 0xf9, 0xa9,
0x29, 0x6a, 0xc9, 0x55, 0x5e, 0x57, 0x74, 0x42, 0x46, 0xab, 0x24, 0x42, 0x53, 0x34, 0x73, 0xd8,
0x28, 0x4f, 0x28, 0x25, 0xee, 0x86, 0x97, 0x22, 0x1a, 0x4d, 0xd1, 0x2c, 0x64, 0x6e, 0xc5, 0x4b,
0x41, 0x1f, 0x10, 0x6f, 0xdf, 0x0a, 0xb9, 0x4a, 0x22, 0xc7, 0xd4, 0x79, 0xda, 0x10, 0xd4, 0x26,
0x5c, 0xf1, 0xc8, 0x3d, 0xd7, 0x66, 0x5c, 0x71, 0xfa, 0x88, 0x84, 0x4b, 0x29, 0xb8, 0x12, 0xd9,
0x5c, 0x45, 0xd8, 0x94, 0x87, 0x69, 0x1f, 0x80, 0xec, 0xbe, 0xc9, 0x6c, 0xd6, 0x3b, 0x67, 0x75,
0x1f, 0xa0, 0x11, 0xf1, 0x13, 0xf1, 0x91, 0xeb, 0x42, 0x45, 0xfe, 0x14, 0xcd, 0x02, 0xe6, 0x67,
0x67, 0x8c, 0x7f, 0x21, 0xe2, 0xed, 0x6a, 0x2d, 0x53, 0x71, 0x2b, 0x61, 0x4a, 0xdc, 0xb7, 0x5d,
0x23, 0x8c, 0x6e, 0xc8, 0x5c, 0xd5, 0x35, 0x82, 0x3e, 0x24, 0x01, 0x34, 0x01, 0x79, 0x2b, 0x1c,
0x68, 0xcb, 0x90, 0xdb, 0xf2, 0xb6, 0x3d, 0xd6, 0x32, 0x33, 0xce, 0x21, 0x0b, 0x1a, 0xcb, 0xf4,
0x1e, 0x71, 0xf6, 0x6c, 0x6d, 0x64, 0x43, 0xe6, 0x68, 0xb6, 0xbe, 0x41, 0xf3, 0x27, 0x68, 0x0a,
0xf9, 0x4d, 0xc8, 0x5b, 0x69, 0x5e, 0x2a, 0x39, 0x37, 0x28, 0xb9, 0xff, 0x57, 0xc2, 0x83, 0xd2,
0x7d, 0x82, 0x77, 0x32, 0x5d, 0x25, 0x76, 0xa6, 0xb8, 0x05, 0x88, 0xbf, 0x23, 0xe2, 0xad, 0x79,
0x57, 0x6b, 0x75, 0xa1, 0x13, 0x1a, 0x9d, 0x29, 0x19, 0xcf, 0x9b, 0xa6, 0xc8, 0x53, 0x73, 0x0b,
0xac, 0xd5, 0x98, 0x0f, 0x21, 0xa8, 0x78, 0x2d, 0x78, 0xab, 0xa5, 0x28, 0x45, 0xa5, 0xac, 0xdf,
0xb8, 0x1c, 0x42, 0xf4, 0x31, 0xc1, 0x4b, 0x51, 0x14, 0x6d, 0xe4, 0x4e, 0x9d, 0xd9, 0xf8, 0xe9,
0xe4, 0xea, 0xef, 0xa5, 0x83, 0x30, 0xc3, 0x29, 0x24, 0xe3, 0x1f, 0x88, 0xb8, 0xc0, 0xf4, 0x0e,
0x41, 0x27, 0x63, 0x80, 0x19, 0x3a, 0x01, 0x75, 0xe6, 0x58, 0xcc, 0x50, 0x07, 0x74, 0x34, 0x47,
0x60, 0x86, 0x8e, 0x40, 0x07, 0xd3, 0x34, 0x66, 0xe8, 0x40, 0x9f, 0x10, 0xff, 0xab, 0x16, 0x32,
0x17, 0x6d, 0x84, 0xcd, 0x41, 0x77, 0x87, 0x83, 0xde, 0x68, 0x21, 0x3b, 0xd6, 0xe7, 0xe1, 0xc3,
0xdc, 0x6e, 0x0a, 0xe5, 0x30, 0x72, 0x33, 0x5a, 0x7f, 0x18, 0x79, 0xac, 0x09, 0x36, 0xdf, 0xc0,
0x12, 0x97, 0x75, 0x59, 0xf2, 0x2a, 0xb3, 0x53, 0xf1, 0xd3, 0x33, 0xc2, 0xa8, 0x92, 0x85, 0x9d,
0xc8, 0x28, 0x5b, 0x00, 0xb3, 0xad, 0xed, 0x7f, 0x24, 0xb7, 0xb0, 0x99, 0x17, 0xb2, 0xd6, 0xcd,
0xa2, 0x3b, 0x77, 0x1e, 0xb2, 0xe0, 0x93, 0x65, 0x78, 0x29, 0xef, 0x0e, 0x42, 0x5a, 0xd5, 0x90,
0x79, 0x47, 0x43, 0xf1, 0x7b, 0x12, 0xce, 0x0b, 0x21, 0x15, 0xd3, 0x85, 0xf8, 0x67, 0x17, 0x94,
0xb8, 0x2f, 0x77, 0xd7, 0x9b, 0xfe, 0x6a, 0x7c, 0xde, 0x5d, 0x6f, 0x86, 0x85, 0x3a, 0x17, 0x0b,
0x85, 0xdf, 0xbf, 0xe2, 0x0d, 0x5f, 0x25, 0x66, 0x3a, 0x0e, 0xf3, 0xbe, 0x18, 0xfa, 0xe0, 0x99,
0x57, 0xfe, 0xec, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x02, 0xe9, 0x30, 0xf7, 0x03, 0x00,
0x00,
// 541 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x94, 0x4b, 0x8e, 0xd3, 0x4c,
0x10, 0xc7, 0xd5, 0xb1, 0x3b, 0x89, 0x2b, 0x9f, 0xf2, 0xa1, 0xd6, 0x08, 0x59, 0x88, 0x45, 0x64,
0xb1, 0x08, 0x12, 0x9a, 0x05, 0x9c, 0x20, 0x89, 0x47, 0x28, 0x30, 0x0c, 0xa1, 0x33, 0x11, 0x2b,
0x16, 0x4d, 0x52, 0x43, 0x2c, 0x39, 0xb6, 0x69, 0xdb, 0x24, 0xde, 0xb2, 0x85, 0xdb, 0x70, 0x01,
0x8e, 0x86, 0xaa, 0xdd, 0x76, 0x2c, 0xf1, 0xd0, 0xec, 0xea, 0x5f, 0x55, 0xae, 0xfe, 0xd5, 0x23,
0x81, 0x71, 0x94, 0x14, 0xa8, 0x13, 0x15, 0x5f, 0x66, 0x3a, 0x2d, 0x52, 0x31, 0x6c, 0x74, 0xf0,
0x83, 0xc1, 0xe8, 0xea, 0x94, 0xc5, 0xa9, 0x56, 0x45, 0x94, 0x26, 0x62, 0x0c, 0xbd, 0x65, 0xe8,
0xb3, 0x09, 0x9b, 0x3a, 0xb2, 0xb7, 0x0c, 0x85, 0x00, 0xf7, 0x46, 0x1d, 0xd0, 0xef, 0x4d, 0xd8,
0xd4, 0x93, 0xc6, 0x16, 0x0f, 0xa1, 0xbf, 0xc9, 0x51, 0x2f, 0x43, 0xdf, 0x31, 0x79, 0x56, 0x51,
0x6e, 0xa8, 0x0a, 0xe5, 0xbb, 0x75, 0x2e, 0xd9, 0xe2, 0x31, 0x78, 0x0b, 0x8d, 0xaa, 0xc0, 0xdd,
0xac, 0xf0, 0xb9, 0x49, 0x3f, 0x3b, 0x28, 0xba, 0xc9, 0x76, 0x36, 0xda, 0xaf, 0xa3, 0xad, 0x43,
0xf8, 0x30, 0x08, 0xf1, 0x4e, 0x95, 0x71, 0xe1, 0x0f, 0x26, 0x6c, 0x3a, 0x94, 0x8d, 0x0c, 0x7e,
0x32, 0xe8, 0xaf, 0xd3, 0x52, 0x6f, 0xf1, 0x5e, 0xc0, 0x02, 0xdc, 0xdb, 0x2a, 0x43, 0x83, 0xeb,
0x49, 0x63, 0x8b, 0x47, 0x30, 0x24, 0xec, 0x84, 0x72, 0x6b, 0xe0, 0x56, 0x53, 0x6c, 0xa5, 0xf2,
0xfc, 0x98, 0xea, 0x9d, 0x61, 0xf6, 0x64, 0xab, 0xc5, 0x03, 0x70, 0x36, 0xf2, 0xda, 0xc0, 0x7a,
0x92, 0xcc, 0xbf, 0x63, 0x52, 0x9d, 0x5b, 0x8c, 0xf1, 0x93, 0x56, 0x77, 0xfe, 0xb0, 0xae, 0xd3,
0xe8, 0xe0, 0x3b, 0xb5, 0x80, 0xfa, 0x0b, 0xea, 0x7b, 0xb5, 0xd0, 0xc5, 0x75, 0xfe, 0x81, 0xeb,
0xfe, 0x19, 0x97, 0x9f, 0x71, 0x2f, 0x80, 0xaf, 0xf5, 0x76, 0x19, 0xda, 0x79, 0xd7, 0x22, 0xf8,
0xca, 0xa0, 0x7f, 0xad, 0xaa, 0xb4, 0x2c, 0x3a, 0x38, 0x9e, 0xc1, 0x99, 0xc0, 0x68, 0x96, 0x65,
0x71, 0xb4, 0x35, 0x17, 0x62, 0xa9, 0xba, 0x2e, 0xca, 0x78, 0x83, 0x2a, 0x2f, 0x35, 0x1e, 0x30,
0x29, 0x2c, 0x5f, 0xd7, 0x25, 0x9e, 0x00, 0x5f, 0x60, 0x1c, 0xe7, 0xbe, 0x3b, 0x71, 0xa6, 0xa3,
0xe7, 0xe3, 0xcb, 0xf6, 0x20, 0xc9, 0x2d, 0xeb, 0x60, 0xf0, 0x8d, 0x81, 0x4b, 0x96, 0xf8, 0x0f,
0xd8, 0xc9, 0x10, 0x70, 0xc9, 0x4e, 0xa4, 0x2a, 0xf3, 0x2c, 0x97, 0xac, 0x22, 0x75, 0x34, 0x4f,
0x70, 0xc9, 0x8e, 0xa4, 0xf6, 0xa6, 0x69, 0x2e, 0xd9, 0x5e, 0x3c, 0x85, 0xc1, 0xe7, 0x12, 0x75,
0x84, 0xb9, 0xcf, 0xcd, 0x43, 0xff, 0x9f, 0x1f, 0x7a, 0x57, 0xa2, 0xae, 0x64, 0x13, 0xa7, 0x0f,
0x23, 0xbb, 0x45, 0x16, 0xd1, 0xc8, 0xcd, 0x68, 0x07, 0xf5, 0xc8, 0xc9, 0x0e, 0x4a, 0xe0, 0xe6,
0x1b, 0x5a, 0xf0, 0x22, 0x3d, 0x1c, 0x54, 0xb2, 0xb3, 0x53, 0x69, 0x24, 0x8d, 0x2a, 0x9c, 0xdb,
0x89, 0xf4, 0xc2, 0x39, 0x69, 0xb9, 0xb2, 0xfd, 0xf7, 0xe4, 0x8a, 0x36, 0xf3, 0x52, 0xa7, 0x65,
0x36, 0xaf, 0xea, 0xce, 0x3d, 0xd9, 0x6a, 0xfa, 0x15, 0xbd, 0xdf, 0xa3, 0xb6, 0xa8, 0x9e, 0xb4,
0x2a, 0xf8, 0x00, 0xde, 0x2c, 0x46, 0x5d, 0xc8, 0x32, 0xc6, 0xdf, 0x76, 0x21, 0xc0, 0x7d, 0xb5,
0x7e, 0x7b, 0xd3, 0x9c, 0x06, 0xd9, 0xe7, 0x85, 0x3a, 0x9d, 0x85, 0x52, 0xf9, 0xd7, 0x2a, 0x53,
0xcb, 0xd0, 0x4c, 0xc7, 0x91, 0x56, 0x05, 0xcf, 0xc0, 0xa5, 0xc3, 0xe9, 0x54, 0x76, 0x4d, 0xe5,
0x0b, 0xe0, 0x57, 0x07, 0x15, 0xc5, 0xb6, 0x74, 0x2d, 0x3e, 0xf6, 0xcd, 0xff, 0xc5, 0x8b, 0x5f,
0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x2c, 0x30, 0x90, 0x41, 0x04, 0x00, 0x00,
}

View File

@ -2,32 +2,33 @@ syntax = "proto3";
package internal;
message Exploration {
int64 ID = 1; // ExplorationID is a unique ID for an Exploration.
string Name = 2; // User provided name of the Exploration.
int64 UserID = 3; // UserID is the owner of this Exploration.
string Data = 4; // Opaque blob of JSON data.
int64 CreatedAt = 5; // Time the exploration was first created.
int64 UpdatedAt = 6; // Latest time the exploration was updated.
bool Default = 7; // Flags an exploration as the default.
int64 ID = 1; // ExplorationID is a unique ID for an Exploration.
string Name = 2; // User provided name of the Exploration.
int64 UserID = 3; // UserID is the owner of this Exploration.
string Data = 4; // Opaque blob of JSON data.
int64 CreatedAt = 5; // Time the exploration was first created.
int64 UpdatedAt = 6; // Latest time the exploration was updated.
bool Default = 7; // Flags an exploration as the default.
}
message Source {
int64 ID = 1; // ID is the unique ID of the source
string Name = 2; // Name is the user-defined name for the source
string Type = 3; // Type specifies which kinds of source (enterprise vs oss)
string Username = 4; // Username is the username to connect to the source
string Password = 5;
string URL = 6; // URL are the connections to the source
bool Default = 7; // Flags an exploration as the default.
int64 ID = 1; // ID is the unique ID of the source
string Name = 2; // Name is the user-defined name for the source
string Type = 3; // Type specifies which kinds of source (enterprise vs oss)
string Username = 4; // Username is the username to connect to the source
string Password = 5;
string URL = 6; // URL are the connections to the source
bool Default = 7; // Flags an exploration as the default.
string Telegraf = 8; // Telegraf is the db telegraf is written to. By default it is "telegraf"
}
message Server {
int64 ID = 1; // ID is the unique ID of the server
string Name = 2; // Name is the user-defined name for the server
string Username = 3; // Username is the username to connect to the server
string Password = 4;
string URL = 5; // URL is the path to the server
int64 SrcID = 6; // SrcID is the ID of the data source
int64 ID = 1; // ID is the unique ID of the server
string Name = 2; // Name is the user-defined name for the server
string Username = 3; // Username is the username to connect to the server
string Password = 4;
string URL = 5; // URL is the path to the server
int64 SrcID = 6; // SrcID is the ID of the data source
}
message Layout {
@ -38,7 +39,7 @@ message Layout {
}
message Cell {
int32 x = 1; // X-coordinate of Cell in the Layout
int32 x = 1; // X-coordinate of Cell in the Layout
int32 y = 2; // Y-coordinate of Cell in the Layout
int32 w = 3; // Width of Cell in the Layout
int32 h = 4; // Height of Cell in the Layout
@ -61,3 +62,8 @@ message AlertRule {
int64 SrcID = 3; // SrcID is the id of the source this alert is associated with
int64 KapaID = 4; // KapaID is the id of the kapacitor this alert is associated with
}
message User {
uint64 ID = 1; // ID is the unique ID of this user
string Email = 2; // Email byte representation of the user
}

View File

@ -39,6 +39,7 @@ func TestMarshalSource(t *testing.T) {
Password: "1 point twenty-one g1g@w@tts",
URL: "http://twin-pines.mall.io:8086",
Default: true,
Telegraf: "telegraf",
}
var vv chronograf.Source

129
bolt/users.go Normal file
View File

@ -0,0 +1,129 @@
package bolt
import (
"context"
"github.com/boltdb/bolt"
"github.com/influxdata/chronograf"
"github.com/influxdata/chronograf/bolt/internal"
)
// Ensure UsersStore implements chronograf.UsersStore.
var _ chronograf.UsersStore = &UsersStore{}
var UsersBucket = []byte("Users")
type UsersStore struct {
client *Client
}
// FindByEmail searches the UsersStore for all users owned with the email
func (s *UsersStore) FindByEmail(ctx context.Context, email string) (*chronograf.User, error) {
var user chronograf.User
err := s.client.db.View(func(tx *bolt.Tx) error {
err := tx.Bucket(UsersBucket).ForEach(func(k, v []byte) error {
var u chronograf.User
if err := internal.UnmarshalUser(v, &u); err != nil {
return err
} else if u.Email != email {
return nil
}
user.Email = u.Email
user.ID = u.ID
return nil
})
if err != nil {
return err
}
if user.ID == 0 {
return chronograf.ErrUserNotFound
}
return nil
})
if err != nil {
return nil, err
}
return &user, nil
}
// Create a new Users in the UsersStore.
func (s *UsersStore) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(UsersBucket)
seq, err := b.NextSequence()
if err != nil {
return err
}
u.ID = chronograf.UserID(seq)
if v, err := internal.MarshalUser(u); err != nil {
return err
} else if err := b.Put(itob(int(u.ID)), v); err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return u, nil
}
// Delete the users from the UsersStore
func (s *UsersStore) Delete(ctx context.Context, u *chronograf.User) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket(UsersBucket).Delete(itob(int(u.ID))); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}
// Get retrieves a user by id.
func (s *UsersStore) Get(ctx context.Context, id chronograf.UserID) (*chronograf.User, error) {
var u chronograf.User
if err := s.client.db.View(func(tx *bolt.Tx) error {
if v := tx.Bucket(UsersBucket).Get(itob(int(id))); v == nil {
return chronograf.ErrUserNotFound
} else if err := internal.UnmarshalUser(v, &u); err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return &u, nil
}
// Update a user
func (s *UsersStore) Update(ctx context.Context, usr *chronograf.User) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
// Retrieve an existing user with the same ID.
var u chronograf.User
b := tx.Bucket(UsersBucket)
if v := b.Get(itob(int(usr.ID))); v == nil {
return chronograf.ErrUserNotFound
} else if err := internal.UnmarshalUser(v, &u); err != nil {
return err
}
u.Email = usr.Email
if v, err := internal.MarshalUser(&u); err != nil {
return err
} else if err := b.Put(itob(int(u.ID)), v); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"BytesPerSec\")) AS \"bytes_per_sec\" FROM apache",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],
@ -32,8 +30,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"ReqPerSec\")) AS \"req_per_sec\" FROM apache",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],
@ -51,8 +47,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"TotalAccesses\")) AS \"tot_access\" FROM apache",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT count(\"check_id\") as \"Number Critical\" FROM consul_health_checks",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"service_name\""
],
@ -34,8 +32,6 @@
"queries": [
{
"query": "SELECT count(\"check_id\") as \"Number Warning\" FROM consul_health_checks",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"service_name\""
],

View File

@ -12,9 +12,7 @@
"name": "CPU Usage",
"queries": [
{
"query": "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"telegraf\"..\"cpu\"",
"db": "telegraf",
"rp": "",
"query": "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"",
"groupbys": [],
"wheres": []
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"used_percent\") AS \"used_percent\" FROM disk",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"path\""
],

View File

@ -1,36 +1,41 @@
{
"id": "0e980b97-c162-487b-a815-3f955df6243f",
"measurement": "docker",
"app": "docker",
"cells": [{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "4c79cefb-5152-410c-9b88-74f9bff7ef22",
"name": "Docker - Container CPU",
"queries": [{
"query": "SELECT mean(\"usage_percent\") AS \"usage_percent\" FROM \"docker_container_cpu\"",
"db": "telegraf",
"rp": "",
"groupbys": ["\"container_name\""],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "4c79cefb-5152-410c-9b88-74f9bff7ef00",
"name": "Docker - Container Memory",
"queries": [{
"query": "SELECT mean(\"usage\") AS \"usage\" FROM \"docker_container_mem\"",
"db": "telegraf",
"rp": "",
"groupbys": ["\"container_name\""],
"wheres": []
}]
}]
}
{
"id": "0e980b97-c162-487b-a815-3f955df6243f",
"measurement": "docker",
"app": "docker",
"cells": [
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "4c79cefb-5152-410c-9b88-74f9bff7ef22",
"name": "Docker - Container CPU",
"queries": [
{
"query": "SELECT mean(\"usage_percent\") AS \"usage_percent\" FROM \"docker_container_cpu\"",
"groupbys": [
"\"container_name\""
],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "4c79cefb-5152-410c-9b88-74f9bff7ef00",
"name": "Docker - Container Memory",
"queries": [
{
"query": "SELECT mean(\"usage\") AS \"usage\" FROM \"docker_container_mem\"",
"groupbys": [
"\"container_name\""
],
"wheres": []
}
]
}
]
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "select non_negative_derivative(mean(search_query_total)) as searches_per_min, non_negative_derivative(mean(search_scroll_total)) as scrolls_per_min, non_negative_derivative(mean(search_fetch_total)) as fetches_per_min, non_negative_derivative(mean(search_suggest_total)) as suggests_per_min from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -30,8 +28,6 @@
"queries": [
{
"query": "select mean(current_open) from elasticsearch_http",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -47,8 +43,6 @@
"queries": [
{
"query": "select non_negative_derivative(mean(search_query_time_in_millis)) as mean, non_negative_derivative(median(search_query_time_in_millis)) as median, non_negative_derivative(percentile(search_query_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -64,8 +58,6 @@
"queries": [
{
"query": "select non_negative_derivative(mean(search_fetch_time_in_millis)) as mean, non_negative_derivative(median(search_fetch_time_in_millis)) as median, non_negative_derivative(percentile(search_fetch_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -81,8 +73,6 @@
"queries": [
{
"query": "select non_negative_derivative(mean(search_suggest_time_in_millis)) as mean, non_negative_derivative(median(search_suggest_time_in_millis)) as median, non_negative_derivative(percentile(search_suggest_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -98,8 +88,6 @@
"queries": [
{
"query": "select non_negative_derivative(mean(search_scroll_time_in_millis)) as mean, non_negative_derivative(median(search_scroll_time_in_millis)) as median, non_negative_derivative(percentile(search_scroll_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -115,8 +103,6 @@
"queries": [
{
"query": "select non_negative_derivative(mean(indexing_index_time_in_millis)) as mean from elasticsearch_indices",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -132,8 +118,6 @@
"queries": [
{
"query": "select mean(gc_collectors_old_collection_count) as old_count, mean(gc_collectors_young_collection_count) as young_count from elasticsearch_jvm",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -149,8 +133,6 @@
"queries": [
{
"query": "select non_negative_derivative(mean(gc_collectors_old_collection_time_in_millis)) as mean_old_time, non_negative_derivative(mean(gc_collectors_young_collection_time_in_millis)) as mean_young_time from elasticsearch_jvm",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -166,8 +148,6 @@
"queries": [
{
"query": "select mean(mem_heap_used_percent) from elasticsearch_jvm",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "select mean(\"active_servers\") AS active_servers, mean(\"backup_servers\") AS backup_servers FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -30,8 +28,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(last(\"http_response.2xx\"), 1s) AS \"2xx\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -47,8 +43,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(last(\"http_response.4xx\"), 1s) AS \"4xx\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -64,8 +58,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(last(\"http_response.5xx\"), 1s) AS \"5xx\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -81,8 +73,6 @@
"queries": [
{
"query": "SELECT mean(\"req_rate\") AS \"requests_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -98,8 +88,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"rate\")) AS \"sessions_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -115,8 +103,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"scur\")) / non_negative_derivative(max(\"slim\")) * 100 AS \"session_usage_percent\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -132,8 +118,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"dreq\")) AS \"denials_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -149,8 +133,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"ereq\")) AS \"errors_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -166,15 +148,11 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"bin\")) AS \"bytes_in_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
},
{
"query": "SELECT non_negative_derivative(max(\"bout\")) AS \"bytes_out_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -190,8 +168,6 @@
"queries": [
{
"query": "SELECT max(\"rtime\") AS \"response_time\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -207,8 +183,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"econ\")) AS \"errors_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -224,8 +198,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"qcur\")) AS \"queued_per_second\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -241,8 +213,6 @@
"queries": [
{
"query": "SELECT max(\"qtime\") AS \"queue_time\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -258,8 +228,6 @@
"queries": [
{
"query": "SELECT max(\"eresp\") AS \"error_response_rate\" FROM haproxy",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,18 +13,12 @@
"queries": [
{
"query": "SELECT max(\"numMeasurements\") AS \"measurements\" FROM \"influxdb_database\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
},
{
"query": "SELECT max(\"numSeries\") AS \"series\" FROM \"influxdb_database\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
}
]

View File

@ -13,10 +13,7 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"writeReq\"), 1s) AS \"http_requests\" FROM \"influxdb_httpd\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
}
]
@ -31,10 +28,7 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"queryReq\"), 1s) AS \"query_requests\" FROM \"influxdb_httpd\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
}
]
@ -49,18 +43,12 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"clientError\"), 1s) AS \"client_errors\" FROM \"influxdb_httpd\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
},
{
"query": "SELECT non_negative_derivative(max(\"authFail\"), 1s) AS \"auth_fail\" FROM \"influxdb_httpd\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
}
]

View File

@ -13,18 +13,12 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"queryDurationNs\"), 1s) AS \"duration\" FROM \"influxdb_queryExecutor\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
},
{
"query": "SELECT non_negative_derivative(max(\"queriesExecuted\"), 1s) AS \"queries_executed\" FROM \"influxdb_queryExecutor\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
}
]

View File

@ -13,10 +13,7 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"pointReq\"), 1s) AS \"points_written\" FROM \"influxdb_write\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
}
]
@ -31,18 +28,12 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"writeError\"), 1s) AS \"shard_write_error\" FROM \"influxdb_write\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
},
{
"query": "SELECT non_negative_derivative(max(\"serveError\"), 1s) AS \"http_error\" FROM \"influxdb_httpd\"",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
}
]

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_node",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"node_name\""
],
@ -32,8 +30,6 @@
"queries": [
{
"query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_node",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"node_name\""
],

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_pod_container",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"pod_name\""
],
@ -32,8 +30,6 @@
"queries": [
{
"query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_pod_container",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"pod_name\""
],

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"tx_bytes\")) AS \"tx_bytes_per_second\" FROM kubernetes_pod_network",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"pod_name\"",
"\"host\""
@ -33,8 +31,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"rx_bytes\")) AS \"rx_bytes_per_second\" FROM kubernetes_pod_network",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"pod_name\"",
"\"host\""

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_system_container",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": [
"\"container_name\" = 'kubelet'"
@ -32,8 +30,6 @@
"queries": [
{
"query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_system_container",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": [
"\"container_name\" = 'kubelet'"

View File

@ -12,9 +12,7 @@
"name": "System Load",
"queries": [
{
"query": "SELECT mean(\"load1\") AS \"load\" FROM \"telegraf\"..\"system\"",
"db": "telegraf",
"rp": "",
"query": "SELECT mean(\"load1\") AS \"load\" FROM \"system\"",
"groupbys": [],
"wheres": []
}

View File

@ -12,9 +12,7 @@
"name": "System - Memory Bytes Used",
"queries": [
{
"query": "SELECT mean(\"used\") AS \"used\", mean(\"available\") AS \"available\" FROM \"telegraf\"..\"mem\"",
"db": "telegraf",
"rp": "",
"query": "SELECT mean(\"used\") AS \"used\", mean(\"available\") AS \"available\" FROM \"mem\"",
"groupbys": [],
"wheres": []
}

View File

@ -1,201 +1,202 @@
{
"id": "f280c8c7-0530-425c-b281-788d8ded7676",
"measurement": "memcached",
"app": "memcached",
"cells": [{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af490",
"name": "Memcached - Current Connections",
"queries": [{
"query": "SELECT max(\"curr_connections\") AS \"current_connections\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af400",
"name": "Memcached - Get Hits/Second",
"queries": [{
"query": "SELECT non_negative_derivative(max(\"get_hits\"), 1s) AS \"get_hits\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af405",
"name": "Memcached - Get Misses/Second",
"queries": [{
"query": "SELECT non_negative_derivative(max(\"get_misses\"), 1s) AS \"get_misses\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af413",
"name": "Memcached - Delete Hits/Second",
"queries": [{
"query": "SELECT non_negative_derivative(max(\"delete_hits\"), 1s) AS \"delete_hits\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af412",
"name": "Memcached - Delete Misses/Second",
"queries": [{
"query": "SELECT non_negative_derivative(max(\"delete_misses\"), 1s) AS \"delete_misses\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af411",
"name": "Memcached - Incr Hits/Second",
"queries": [{
"query": "SELECT non_negative_derivative(max(\"incr_hits\"), 1s) AS \"incr_hits\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af510",
"name": "Memcached - Incr Misses/Second",
"queries": [{
"query": "SELECT non_negative_derivative(max(\"incr_misses\"), 1s) AS \"incr_misses\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af402",
"name": "Memcached - Current Items",
"queries": [{
"query": "SELECT max(\"curr_items\") AS \"current_items\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af403",
"name": "Memcached - Total Items",
"queries": [{
"query": "SELECT max(\"total_items\") AS \"total_items\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af404",
"name": "Memcached - Bytes Stored",
"queries": [{
"query": "SELECT max(\"bytes\") AS \"bytes\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af406",
"name": "Memcached - Bytes Read/Sec",
"queries": [{
"query": "SELECT non_negative_derivative(max(\"bytes_read\"), 1s) AS \"bytes_read\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af407",
"name": "Memcached - Bytes Written/Sec",
"queries": [{
"query": "SELECT non_negative_derivative(max(\"bytes_written\"), 1s) AS \"bytes_written\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af401",
"name": "Memcached - Evictions/10 Seconds",
"queries": [{
"query": "SELECT non_negative_derivative(max(\"evictions\"), 10s) AS \"evictions\" FROM memcached",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
}
]
}
{
"id": "f280c8c7-0530-425c-b281-788d8ded7676",
"measurement": "memcached",
"app": "memcached",
"cells": [
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af490",
"name": "Memcached - Current Connections",
"queries": [
{
"query": "SELECT max(\"curr_connections\") AS \"current_connections\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af400",
"name": "Memcached - Get Hits/Second",
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"get_hits\"), 1s) AS \"get_hits\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af405",
"name": "Memcached - Get Misses/Second",
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"get_misses\"), 1s) AS \"get_misses\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af413",
"name": "Memcached - Delete Hits/Second",
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"delete_hits\"), 1s) AS \"delete_hits\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af412",
"name": "Memcached - Delete Misses/Second",
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"delete_misses\"), 1s) AS \"delete_misses\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af411",
"name": "Memcached - Incr Hits/Second",
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"incr_hits\"), 1s) AS \"incr_hits\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af510",
"name": "Memcached - Incr Misses/Second",
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"incr_misses\"), 1s) AS \"incr_misses\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af402",
"name": "Memcached - Current Items",
"queries": [
{
"query": "SELECT max(\"curr_items\") AS \"current_items\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af403",
"name": "Memcached - Total Items",
"queries": [
{
"query": "SELECT max(\"total_items\") AS \"total_items\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af404",
"name": "Memcached - Bytes Stored",
"queries": [
{
"query": "SELECT max(\"bytes\") AS \"bytes\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af406",
"name": "Memcached - Bytes Read/Sec",
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"bytes_read\"), 1s) AS \"bytes_read\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af407",
"name": "Memcached - Bytes Written/Sec",
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"bytes_written\"), 1s) AS \"bytes_written\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af401",
"name": "Memcached - Evictions/10 Seconds",
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"evictions\"), 10s) AS \"evictions\" FROM memcached",
"groupbys": [],
"wheres": []
}
]
}
]
}

View File

@ -1,117 +1,112 @@
{
"id": "921298ad-0cdd-44f4-839b-10c319e7fcc7",
"measurement": "mongodb",
"app": "mongodb",
"cells": [{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "b2631fd5-7d32-4a31-9edf-98362fd3626e",
"name": "MongoDB Read/Second",
"queries": [{
"query": "SELECT mean(queries_per_sec) AS queries_per_second, mean(getmores_per_sec) AS getmores_per_second FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"id": "921298ad-0cdd-44f4-839b-10c319e7fcc7",
"measurement": "mongodb",
"app": "mongodb",
"cells": [
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "9362e390-951b-4dba-adec-40c261e37604",
"name": "MongoDB Writes/Second",
"queries": [{
"query": "SELECT mean(inserts_per_sec) AS inserts_per_second, mean(updates_per_sec) AS updates_per_second, mean(deletes_per_sec) AS deletes_per_second FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "b2631fd5-7d32-4a31-9edf-98362fd3626e",
"name": "MongoDB Read/Second",
"queries": [
{
"query": "SELECT mean(queries_per_sec) AS queries_per_second, mean(getmores_per_sec) AS getmores_per_second FROM mongodb",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "7ca54d4c-9f0d-47fd-a7fe-2d01e832bbf4",
"name": "MongoDB Active Connections",
"queries": [{
"query": "SELECT mean(open_connections) AS open_connections FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "ea5ae388-9ca3-42f9-835f-cc9b265705be",
"name": "MongoDB Reads/Writes Waiting in Queue",
"queries": [{
"query": "SELECT max(queued_reads) AS queued_reads, max(queued_writes) as queued_writes FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "631dcbba-c997-4fd7-b640-754a1b36026c",
"name": "MongoDB Network Bytes/Second",
"queries": [{
"query": "SELECT mean(net_in_bytes) AS net_in_bytes, mean(net_out_bytes) as net_out_bytes FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "5b03bef0-e5e9-4b53-b5f8-1d1b740cf5a2",
"name": "MongoDB Page Faults",
"queries": [{
"query": "SELECT mean(page_faults_per_sec) AS page_faults_per_second FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "4bc98883-2347-46bb-9459-1c6fe7fb47a8",
"name": "MongoDB Memory Usage (MB)",
"queries": [{
"query": "SELECT mean(vsize_megabytes) AS virtual_memory_megabytes, mean(resident_megabytes) as resident_memory_megabytes FROM mongodb",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
]
}
]
}
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "9362e390-951b-4dba-adec-40c261e37604",
"name": "MongoDB Writes/Second",
"queries": [
{
"query": "SELECT mean(inserts_per_sec) AS inserts_per_second, mean(updates_per_sec) AS updates_per_second, mean(deletes_per_sec) AS deletes_per_second FROM mongodb",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "7ca54d4c-9f0d-47fd-a7fe-2d01e832bbf4",
"name": "MongoDB Active Connections",
"queries": [
{
"query": "SELECT mean(open_connections) AS open_connections FROM mongodb",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "ea5ae388-9ca3-42f9-835f-cc9b265705be",
"name": "MongoDB Reads/Writes Waiting in Queue",
"queries": [
{
"query": "SELECT max(queued_reads) AS queued_reads, max(queued_writes) as queued_writes FROM mongodb",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "631dcbba-c997-4fd7-b640-754a1b36026c",
"name": "MongoDB Network Bytes/Second",
"queries": [
{
"query": "SELECT mean(net_in_bytes) AS net_in_bytes, mean(net_out_bytes) as net_out_bytes FROM mongodb",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "5b03bef0-e5e9-4b53-b5f8-1d1b740cf5a2",
"name": "MongoDB Page Faults",
"queries": [
{
"query": "SELECT mean(page_faults_per_sec) AS page_faults_per_second FROM mongodb",
"groupbys": [],
"wheres": []
}
]
},
{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "4bc98883-2347-46bb-9459-1c6fe7fb47a8",
"name": "MongoDB Memory Usage (MB)",
"queries": [
{
"query": "SELECT mean(vsize_megabytes) AS virtual_memory_megabytes, mean(resident_megabytes) as resident_memory_megabytes FROM mongodb",
"groupbys": [],
"wheres": []
}
]
}
]
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"commands_select\")) AS selects_per_second FROM mysql",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],
@ -32,8 +30,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"commands_insert\")) AS inserts_per_second, non_negative_derivative(max(\"commands_update\")) AS updates_per_second, non_negative_derivative(max(\"commands_delete\")) AS deletes_per_second FROM mysql",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],
@ -51,8 +47,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"threads_connected\")) AS cxn_per_second, non_negative_derivative(max(\"threads_running\")) AS threads_running_per_second FROM mysql",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],
@ -70,8 +64,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"connection_errors_max_connections\")) AS cxn_errors_per_second, non_negative_derivative(max(\"connection_errors_internal\")) AS internal_cxn_errors_per_second, non_negative_derivative(max(\"aborted_connects\")) AS cxn_aborted_per_second FROM mysql",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],

View File

@ -1,7 +1,7 @@
{
"id": "ff41d044-f61a-4522-8de7-9e39e3a1b5de",
"measurement": "netstat",
"app": "network",
"app": "system",
"cells": [
{
"x": 0,
@ -13,15 +13,11 @@
"queries": [
{
"query": "SELECT mean(\"tcp_established\") AS \"tcp_established\" FROM netstat",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
},
{
"query": "SELECT mean(\"udp_socket\") AS \"udp_socket\" FROM netstat",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -37,15 +33,11 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"tcp_established\")) AS \"tcp_established\" FROM netstat",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
},
{
"query": "SELECT non_negative_derivative(max(\"udp_socket\")) AS \"udp_socket\" FROM netstat",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -37,24 +37,22 @@ UUID=$(uuidgen | tr A-Z a-z)
APP_FILE="$measurement".json
echo Creating measurement file $APP_FILE
cat > $APP_FILE << EOF
{
"id": "$UUID",
"measurement": "$measurement",
"app": "$measurement",
"cells": [{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "$CELLID",
"name": "User facing cell Name",
"queries": [{
"query": "select mean(\"used_percent from\") from disk",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}]
}]
}
{
"id": "$UUID",
"measurement": "$measurement",
"app": "$measurement",
"cells": [{
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"i": "$CELLID",
"name": "User facing cell Name",
"queries": [{
"query": "select mean(\"used_percent from\") from disk",
"groupbys": [],
"wheres": []
}]
}]
}
EOF

View File

@ -13,10 +13,8 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"accepts\"), 1s) AS \"accepts\", non_negative_derivative(max(\"handled\"), 1s) AS \"handled\", non_negative_derivative(max(\"active\"), 1s) AS \"active\" FROM nginx",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
"\"server\""
],
"wheres": []
}
@ -32,8 +30,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"accepts\")) - non_negative_derivative(max(\"handled\")) FROM nginx",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],
@ -51,8 +47,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"requests\"), 1s) AS \"requests\" FROM nginx",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],
@ -70,8 +64,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"waiting\"), 1s) AS \"waiting\", non_negative_derivative(max(\"reading\"), 1s) AS \"reading\", non_negative_derivative(max(\"writing\"), 1s) AS \"writing\" FROM nginx",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"client_count\") AS \"client_count\" FROM nsq_channel",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"topic\"",
"\"channel\""
@ -33,8 +31,6 @@
"queries": [
{
"query": "SELECT mean(\"message_count\") AS \"message_count\" FROM nsq_channel",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"topic\"",
"\"channel\""

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"topic_count\") AS \"topic_count\" FROM nsq_server",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -30,8 +28,6 @@
"queries": [
{
"query": "SELECT mean(\"server_count\") AS \"server_count\" FROM nsq_server",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"depth\") AS \"depth\" FROM nsq_topic",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"topic\""
],
@ -32,8 +30,6 @@
"queries": [
{
"query": "SELECT mean(\"backend_depth\") AS \"backend_depth\" FROM nsq_topic",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"topic\""
],
@ -51,8 +47,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"message_count\")) AS \"messages_per_second\" FROM nsq_topic",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"topic\"",
"\"host\""
@ -71,8 +65,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"message_count\")) - non_negative_derivative(max(\"depth\")) AS \"messages_per_second\" FROM nsq_topic",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"topic\"",
"\"host\""

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "select max(\"percent_packet_loss\") as \"packet_loss\" from ping",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],
@ -32,8 +30,6 @@
"queries": [
{
"query": "select mean(\"average_response_ms\") as \"average\", mean(\"minimum_response_ms\") as \"min\", mean(\"maximum_response_ms\") as \"max\" from ping",
"db": "telegraf",
"rp": "",
"groupbys": [
"\"server\""
],

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(mean(\"tup_fetched\")) AS \"fetched\", non_negative_derivative(mean(\"tup_returned\")) AS \"returned\", non_negative_derivative(mean(\"tup_inserted\")) AS \"inserted\", non_negative_derivative(mean(\"tup_updated\")) AS \"updated\" FROM postgresql",
"db": "telegraf",
"rp": "",
"groupbys": [
"db"
],
@ -32,8 +30,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(mean(\"xact_commit\")) AS \"xact_commit\" FROM postgresql",
"db": "telegraf",
"rp": "",
"groupbys": [
"db"
],
@ -51,10 +47,7 @@
"queries": [
{
"query": "SELECT mean(\"buffers_alloc\") AS \"buffers_allocated\", mean(\"buffers_backend\") AS \"buffers_backend\", mean(\"buffers_backend_fsync\") AS \"buffers_backend_fsync\", mean(\"buffers_checkpoint\") AS \"buffers_checkpoint\", mean(\"buffers_clean\") AS \"buffers_clean\" FROM postgresql",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
}
]
@ -69,10 +62,7 @@
"queries": [
{
"query": "SELECT mean(\"conflicts\") AS \"conflicts\", mean(\"deadlocks\") AS \"deadlocks\" FROM postgresql",
"db": "telegraf",
"rp": "",
"groupbys": [
],
"groupbys": [],
"wheres": []
}
]

View File

@ -1,7 +1,7 @@
{
"id": "ffad2dff-d263-412e-806a-1e836af87942",
"measurement": "processes",
"app": "processes",
"app": "system",
"cells": [
{
"x": 0,
@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"total\") AS \"total\" FROM processes",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"clients\") AS \"clients\" FROM redis",
"db": "telegraf",
"rp": "",
"groupbys": []
}
]
@ -29,8 +27,6 @@
"queries": [
{
"query": "SELECT mean(\"blocked_clients\") AS \"blocked_clients\" FROM redis",
"db": "telegraf",
"rp": "",
"groupbys": []
}
]
@ -45,8 +41,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"used_cpu_user\")) AS \"used_cpu_per_second\" FROM redis",
"db": "telegraf",
"rp": "",
"groupbys": []
}
]
@ -61,8 +55,6 @@
"queries": [
{
"query": "SELECT non_negative_derivative(max(\"used_memory\")) AS \"used_memory_per_second\" FROM redis",
"db": "telegraf",
"rp": "",
"groupbys": []
}
]

View File

@ -13,9 +13,9 @@
"queries": [
{
"query": "SELECT max(\"memory_total\") as memory_total_bytes FROM riak",
"db": "telegraf",
"rp": "",
"groupbys": ["\"nodename\""],
"groupbys": [
"\"nodename\""
],
"wheres": []
}
]
@ -30,9 +30,9 @@
"queries": [
{
"query": "SELECT max(\"node_get_fsm_objsize_median\") AS \"median\", max(\"node_get_fsm_objsize_100\") AS \"100th-percentile\", max(\"node_get_fsm_objsize_99\") AS \"99th-percentile\", max(\"node_get_fsm_objsize_mean\") AS \"mean\", max(\"node_get_fsm_objsize_95\") AS \"95th-percentile\" FROM riak",
"db": "telegraf",
"rp": "",
"groupbys": ["\"nodename\""],
"groupbys": [
"\"nodename\""
],
"wheres": []
}
]
@ -47,9 +47,9 @@
"queries": [
{
"query": "SELECT max(\"node_get_fsm_siblings_median\") AS \"median\", max(\"node_get_fsm_siblings_mean\") AS \"mean\", max(\"node_get_fsm_siblings_99\") AS \"99th-percentile\", max(\"node_get_fsm_siblings_95\") AS \"95h-percentile\", max(\"node_get_fsm_siblings_100\") AS \"100th-percentile\" FROM riak",
"db": "telegraf",
"rp": "",
"groupbys": ["\"nodename\""],
"groupbys": [
"\"nodename\""
],
"wheres": []
}
]
@ -64,9 +64,9 @@
"queries": [
{
"query": "SELECT max(\"node_put_fsm_time_median\") / 1000 AS \"median_put_milliseconds\", max(\"node_get_fsm_time_median\") / 1000 AS \"median_get_milliseconds\" FROM riak",
"db": "telegraf",
"rp": "",
"groupbys": ["\"nodename\""],
"groupbys": [
"\"nodename\""
],
"wheres": []
}
]
@ -81,9 +81,9 @@
"queries": [
{
"query": "SELECT max(\"node_puts\") AS \"puts_per_minute\", max(\"node_gets\") AS \"gets_per_minute\" FROM riak",
"db": "telegraf",
"rp": "",
"groupbys": ["\"nodename\""],
"groupbys": [
"\"nodename\""
],
"wheres": []
}
]
@ -98,9 +98,9 @@
"queries": [
{
"query": "SELECT max(\"pbc_active\") AS \"active_protobuf_connections\" FROM riak",
"db": "telegraf",
"rp": "",
"groupbys": ["\"nodename\""],
"groupbys": [
"\"nodename\""
],
"wheres": []
}
]
@ -115,9 +115,9 @@
"queries": [
{
"query": "SELECT max(\"read_repairs\") AS \"read_repairs_per_minute\" FROM riak",
"db": "telegraf",
"rp": "",
"groupbys": ["\"nodename\""],
"groupbys": [
"\"nodename\""
],
"wheres": []
}
]

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "select non_negative_derivative(mean(cache_hit)) as hits, non_negative_derivative(mean(cache_miss)) as misses from varnish",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"Percent_Processor_Time\") AS \"percent_processor_time\" FROM win_cpu",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"Available_Bytes\") AS \"available_bytes\" FROM win_mem",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"Bytes_Sent_persec\") AS \"bytes_sent\" FROM win_net",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}
@ -30,8 +28,6 @@
"queries": [
{
"query": "SELECT mean(\"Bytes_Received_persec\") AS \"bytes_received\" FROM win_net",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,8 +13,6 @@
"queries": [
{
"query": "SELECT mean(\"Processor_Queue_Length\") AS \"load\" FROM win_system",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,22 +13,16 @@
"queries": [
{
"query": "SELECT mean(\"Get_Requests_persec\") AS \"gets\" FROM win_websvc",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
},
{
"query": "SELECT mean(\"Post_Requests_persec\") AS \"posts\" FROM win_websvc",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
},
{
"query": "SELECT mean(\"Current_Connections\") AS \"connections\" FROM win_websvc",
"db": "telegraf",
"rp": "",
"groupbys": [],
"wheres": []
}

View File

@ -13,6 +13,7 @@ const (
ErrSourceNotFound = Error("source not found")
ErrServerNotFound = Error("server not found")
ErrLayoutNotFound = Error("layout not found")
ErrUserNotFound = Error("user not found")
ErrLayoutInvalid = Error("layout is invalid")
ErrAlertNotFound = Error("alert not found")
ErrAuthentication = Error("user not authenticated")
@ -54,11 +55,11 @@ type TimeSeries interface {
// Query retrieves a Response from a TimeSeries.
type Query struct {
Command string `json:"query"` // Command is the query itself
DB string `json:"db"` // DB is optional and if empty will not be used.
RP string `json:"rp"` // RP is a retention policy and optional; if empty will not be used.
Wheres []string `json:"wheres"` // Wheres restricts the query to certain attributes
GroupBys []string `json:"groupbys"` // GroupBys collate the query by these tags
Command string `json:"query"` // Command is the query itself
DB string `json:"db,omitempty"` // DB is optional and if empty will not be used.
RP string `json:"rp,omitempty"` // RP is a retention policy and optional; if empty will not be used.
Wheres []string `json:"wheres"` // Wheres restricts the query to certain attributes
GroupBys []string `json:"groupbys"` // GroupBys collate the query by these tags
}
// Response is the result of a query against a TimeSeries
@ -75,6 +76,7 @@ type Source struct {
Password string `json:"password,omitempty"` // Password is in CLEARTEXT
URL string `json:"url"` // URL are the connections to the source
Default bool `json:"default"` // Default specifies the default source for the application
Telegraf string `json:"telegraf"` // Telegraf is the db telegraf is written to. By default it is "telegraf"
}
// SourcesStore stores connection information for a `TimeSeries`
@ -195,23 +197,22 @@ type UserID int
// User represents an authenticated user.
type User struct {
ID UserID
Name string
ID UserID `json:"id"`
Email string `json:"email"`
}
// AuthStore is the Storage and retrieval of authentication information
type AuthStore struct {
// User management for the AuthStore
Users interface {
// Create a new User in the AuthStore
Add(context.Context, User) error
// Delete the User from the AuthStore
Delete(context.Context, User) error
// Retrieve a user if `ID` exists.
Get(ctx context.Context, ID int) error
// Update the user's permissions or roles
Update(context.Context, User) error
}
// UsersStore is the Storage and retrieval of authentication information
type UsersStore interface {
// Create a new User in the UsersStore
Add(context.Context, *User) (*User, error)
// Delete the User from the UsersStore
Delete(context.Context, *User) error
// Get retrieves a user if `ID` exists.
Get(ctx context.Context, ID UserID) (*User, error)
// Update the user's permissions or roles
Update(context.Context, *User) error
// FindByEmail will retrieve a user by email address.
FindByEmail(ctx context.Context, Email string) (*User, error)
}
// ExplorationID is a unique ID for an Exploration.

View File

@ -1,32 +1,64 @@
---
machine:
services:
- docker
post:
- go version
- go version | grep 1.7.3 || (sudo rm -rf /usr/local/go && wget https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz && sudo tar -C /usr/local -xzf go1.7.3.linux-amd64.tar.gz)
- go version
services:
- docker
environment:
DOCKER_TAG: chronograf-20161121
dependencies:
pre:
- npm install -g node-sass
- git config --global url."git@github.com:".insteadOf "https://github.com/"
- mkdir -p ${HOME}/.go_workspace/src/github.com/influxdata
- ln -sf ${HOME}/chronograf ${HOME}/.go_workspace/src/github.com/influxdata
- "make clean":
pwd: ../.go_workspace/src/github.com/influxdata/chronograf
- "make":
pwd: ../.go_workspace/src/github.com/influxdata/chronograf
override:
- ./etc/scripts/docker/pull.sh
test:
override:
- make test
override:
- >
./etc/scripts/docker/run.sh
--test
--no-build
deployment:
quayio:
branch: master
commands:
- make docker
- docker login -e $QUAY_EMAIL -u "$QUAY_USER" -p $QUAY_PASS quay.io
- docker tag chronograf quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7}
- docker push quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7}
master:
branch: master
commands:
- >
./etc/scripts/docker/run.sh
--clean
--package
--platform all
--arch all
--upload
- sudo chown -R ubuntu:ubuntu /home/ubuntu
- cp build/linux/static_amd64/chronograf .
- docker build -t chronograf .
- docker login -e $QUAY_EMAIL -u "$QUAY_USER" -p $QUAY_PASS quay.io
- docker tag chronograf quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7}
- docker push quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7}
- mv ./build/* $CIRCLE_ARTIFACTS
pre-release:
tag: /^[0-9]+(\.[0-9]+)*(\S*)([a|rc|beta]([0-9]+))+$/
commands:
- >
./etc/scripts/docker/run.sh
--clean
--release
--package
--platform all
--arch all
--upload
--bucket dl.influxdata.com/chronograf/releases
- sudo chown -R ubuntu:ubuntu /home/ubuntu
- mv ./build/* $CIRCLE_ARTIFACTS
release:
tag: /^[0-9]+(\.[0-9]+)*$/
commands:
- >
./etc/scripts/docker/run.sh
--clean
--release
--package
--platform all
--arch all
--upload
--bucket dl.influxdata.com/chronograf/releases
- sudo chown -R ubuntu:ubuntu /home/ubuntu
- mv ./build/* $CIRCLE_ARTIFACTS

View File

@ -6,7 +6,7 @@ It makes owning the monitoring and alerting for your infrastructure easy to setu
The next sections will get you up and running with Chronograf with as little configuration and
code as possible.
By the end of this document you will have downloaded, installed, and configured all four packages of the
TICK stack ([Telegraf](https://github.com/influxdata/telegraf), [InfluxDB](https://github.com/influxdata/influxdb), Chronograf, and [Kapacitor](https://github.com/influxdata/kapacitor)), and you will be all set to monitor you infrastructure.
TICK stack ([Telegraf](https://github.com/influxdata/telegraf), [InfluxDB](https://github.com/influxdata/influxdb), Chronograf, and [Kapacitor](https://github.com/influxdata/kapacitor)), and you will be all set to monitor your infrastructure.
## Operating System Support
Chronograf and the other components of the TICK stack are supported on a large number of operating systems and hardware architectures.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

After

Width:  |  Height:  |  Size: 93 KiB

9
docs/proto.md Normal file
View File

@ -0,0 +1,9 @@
download a binary here https://github.com/google/protobuf/releases/tag/v3.1.0
run the following 4 commands listed here https://github.com/gogo/protobuf
```sh
go get github.com/gogo/protobuf/proto
go get github.com/gogo/protobuf/jsonpb
go get github.com/gogo/protobuf/protoc-gen-gogo
go get github.com/gogo/protobuf/gogoproto
```

View File

@ -11,6 +11,7 @@ import hashlib
import re
import logging
import argparse
import json
################
#### Chronograf Variables
@ -147,7 +148,6 @@ def run_generate():
"""Generate static assets.
"""
logging.info("Generating static assets...")
run("make dep", shell=True)
run("make assets", shell=True)
return True
@ -157,75 +157,46 @@ def go_get(branch, update=False, no_uncommitted=False):
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
if not check_path_for("gdm"):
logging.info("Downloading `gdm`...")
get_command = "go get github.com/sparrc/gdm"
run(get_command)
logging.info("Retrieving dependencies with `gdm`...")
sys.stdout.flush()
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH")))
run("make dep", shell=True)
return True
def run_tests(race, parallel, timeout, no_vet):
"""Run the Go test suite on binary output.
"""
logging.info("Starting tests...")
if race:
logging.info("Race is enabled.")
if parallel is not None:
logging.info("Using parallel: {}".format(parallel))
if timeout is not None:
logging.info("Using timeout: {}".format(timeout))
out = run("go fmt ./...")
if len(out) > 0:
logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.")
logging.error("{}".format(out))
return False
if not no_vet:
logging.info("Running 'go vet'...")
out = run(go_vet_command)
if len(out) > 0:
logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.")
logging.error("{}".format(out))
return False
else:
logging.info("Skipping 'go vet' call...")
test_command = "go test -v"
if race:
test_command += " -race"
if parallel is not None:
test_command += " -parallel {}".format(parallel)
if timeout is not None:
test_command += " -timeout {}".format(timeout)
test_command += " ./..."
logging.info("Running tests...")
output = run(test_command)
logging.debug("Test output:\n{}".format(output.encode('ascii', 'ignore')))
run("make test", shell=True, print_output=True)
return True
################
#### All Chronograf-specific content above this line
################
def run(command, allow_failure=False, shell=False):
def run(command, allow_failure=False, shell=False, print_output=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode('utf-8').strip()
# logging.debug("Command output: {}".format(out))
except subprocess.CalledProcessError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e.output))
return None
else:
logging.error("Command '{}' failed with error: {}".format(command, e.output))
sys.exit(1)
cmd = command
if not shell:
cmd = command.split()
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
if print_output:
stdout = None
p = subprocess.Popen(cmd, shell=shell, stdout=stdout, stderr=stderr)
out, _ = p.communicate()
if out is not None:
out = out.decode('utf-8').strip()
if p.returncode != 0:
if allow_failure:
logging.warn(u"Command '{}' failed with error: {}".format(command, out))
return None
else:
logging.error(u"Command '{}' failed with error: {}".format(command, out))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
@ -767,6 +738,9 @@ def main(args):
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
return 1
if args.no_build:
return 0
platforms = []
single_build = True
if args.platform == 'all':
@ -828,10 +802,54 @@ def main(args):
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
logging.info("Packages created:")
package_output = {}
package_output["version"] = args.version
for p in packages:
logging.info("{} (MD5={})".format(p.split('/')[-1:][0],
generate_md5_from_file(p)))
p_name = p.split('/')[-1:][0]
if ".asc" in p_name:
# Skip public keys
continue
arch = None
type = None
regex = None
if ".deb" in p_name:
type = "ubuntu"
regex = r"^.+_(.+)\.deb$"
elif ".rpm" in p_name:
type = "centos"
regex = r"^.+\.(.+)\.rpm$"
elif ".tar.gz" in p_name:
if "linux" in p_name:
if "static" in p_name:
type = "linux_static"
else:
type = "linux"
elif "darwin" in p_name:
type = "darwin"
regex = r"^.+_(.+)\.tar.gz$"
elif ".zip" in p_name:
if "windows" in p_name:
type = "windows"
regex = r"^.+_(.+)\.zip$"
if regex is None or type is None:
logging.error("Could not determine package type for: {}".format(p))
return 1
match = re.search(regex, p_name)
arch = match.groups()[0]
if arch is None:
logging.error("Could not determine arch for: {}".format(p))
return 1
if arch == "x86_64":
arch = "amd64"
elif arch == "x86_32":
arch = "i386"
package_output[str(arch) + "_" + str(type)] = {
"md5": generate_md5_from_file(p),
"filename": p_name,
}
logging.info(json.dumps(package_output, sort_keys=True, indent=4))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(orig_branch))
run("git checkout {}".format(orig_branch))
@ -964,6 +982,9 @@ if __name__ == '__main__':
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
parser.add_argument('--no-build',
action='store_true',
help='Dont build anything.')
args = parser.parse_args()
print_banner()
sys.exit(main(args))

8
etc/scripts/docker/build.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
set -x
docker_tag="chronograf-$(date +%Y%m%d)"
docker build --rm=false -f etc/Dockerfile_build -t builder:$docker_tag .
docker tag builder:$docker_tag quay.io/influxdb/builder:$docker_tag
docker push quay.io/influxdb/builder:$docker_tag

11
etc/scripts/docker/pull.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
#
# Pull the required build image from quay.io.
#
if [[ -z "$DOCKER_TAG" ]]; then
echo "Please specify a tag to pull from with the DOCKER_TAG env variable."
exit 1
fi
docker pull quay.io/influxdb/builder:$DOCKER_TAG

26
etc/scripts/docker/run.sh Executable file
View File

@ -0,0 +1,26 @@
#!/bin/bash
#
# Pass all CLI arguments to Chronograf builder Docker image (passing
# them to the build scripts)
#
# WARNING: This script passes your SSH and AWS credentials within the
# Docker image, so use with caution.
#
set -e
# Default SSH key to $HOME/.ssh/id_rsa if not set
test -z $SSH_KEY_PATH && SSH_KEY_PATH="$HOME/.ssh/id_rsa"
echo "Using SSH key located at: $SSH_KEY_PATH"
# Default docker tag if not specified
test -z "$DOCKER_TAG" && DOCKER_TAG="chronograf-20161121"
docker run \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-v $SSH_KEY_PATH:/root/.ssh/id_rsa \
-v ~/.ssh/known_hosts:/root/.ssh/known_hosts \
-v $(pwd):/root/go/src/github.com/influxdata/chronograf \
quay.io/influxdb/builder:$DOCKER_TAG \
"$@"

View File

@ -45,14 +45,14 @@ type explorations struct {
func (h *Service) Explorations(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
mrExs, err := h.ExplorationStore.Query(ctx, chronograf.UserID(id))
if err != nil {
unknownErrorWithMessage(w, err)
unknownErrorWithMessage(w, err, h.Logger)
return
}
@ -71,20 +71,20 @@ func (h *Service) Explorations(w http.ResponseWriter, r *http.Request) {
func (h *Service) ExplorationsID(w http.ResponseWriter, r *http.Request) {
eID, err := paramID("eid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
uID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(eID))
if err != nil || e.UserID != chronograf.UserID(uID) {
notFound(w, eID)
notFound(w, eID, h.Logger)
return
}
@ -101,26 +101,26 @@ type patchExplorationRequest struct {
func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) {
id, err := paramID("eid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
uID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(id))
if err != nil || e.UserID != chronograf.UserID(uID) {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
var req patchExplorationRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
@ -128,7 +128,7 @@ func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) {
var ok bool
if e.Data, ok = req.Data.(string); !ok {
err := fmt.Errorf("Error: Exploration data is not a string")
invalidData(w, err)
invalidData(w, err, h.Logger)
return
}
}
@ -139,7 +139,7 @@ func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) {
if err := h.ExplorationStore.Update(ctx, e); err != nil {
msg := "Error: Failed to update Exploration"
Error(w, http.StatusInternalServerError, msg)
Error(w, http.StatusInternalServerError, msg, h.Logger)
return
}
@ -156,14 +156,14 @@ type postExplorationRequest struct {
func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) {
uID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
// TODO: Check user if user exists.
var req postExplorationRequest
if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
@ -182,7 +182,7 @@ func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) {
e, err = h.ExplorationStore.Add(ctx, e)
if err != nil {
msg := fmt.Errorf("Error: Failed to save Exploration")
unknownErrorWithMessage(w, msg)
unknownErrorWithMessage(w, msg, h.Logger)
return
}
@ -195,25 +195,25 @@ func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) {
func (h *Service) RemoveExploration(w http.ResponseWriter, r *http.Request) {
eID, err := paramID("eid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
uID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(eID))
if err != nil || e.UserID != chronograf.UserID(uID) {
notFound(w, eID)
notFound(w, eID, h.Logger)
return
}
if err := h.ExplorationStore.Delete(ctx, &chronograf.Exploration{ID: chronograf.ExplorationID(eID)}); err != nil {
unknownErrorWithMessage(w, err)
unknownErrorWithMessage(w, err, h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)

View File

@ -55,24 +55,24 @@ type kapacitor struct {
func (h *Service) NewKapacitor(w http.ResponseWriter, r *http.Request) {
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
_, err = h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID)
notFound(w, srcID, h.Logger)
return
}
var req postKapacitorRequest
if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
if err := req.Valid(); err != nil {
invalidData(w, err)
invalidData(w, err, h.Logger)
return
}
@ -86,7 +86,7 @@ func (h *Service) NewKapacitor(w http.ResponseWriter, r *http.Request) {
if srv, err = h.ServersStore.Add(ctx, srv); err != nil {
msg := fmt.Errorf("Error storing kapacitor %v: %v", req, err)
unknownErrorWithMessage(w, msg)
unknownErrorWithMessage(w, msg, h.Logger)
return
}
@ -120,7 +120,7 @@ func (h *Service) Kapacitors(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
mrSrvs, err := h.ServersStore.All(ctx)
if err != nil {
Error(w, http.StatusInternalServerError, "Error loading kapacitors")
Error(w, http.StatusInternalServerError, "Error loading kapacitors", h.Logger)
return
}
@ -140,20 +140,20 @@ func (h *Service) Kapacitors(w http.ResponseWriter, r *http.Request) {
func (h *Service) KapacitorsID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
@ -165,25 +165,25 @@ func (h *Service) KapacitorsID(w http.ResponseWriter, r *http.Request) {
func (h *Service) RemoveKapacitor(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
if err = h.ServersStore.Delete(ctx, srv); err != nil {
unknownErrorWithMessage(w, err)
unknownErrorWithMessage(w, err, h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
@ -213,31 +213,31 @@ func (p *patchKapacitorRequest) Valid() error {
func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
var req patchKapacitorRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
if err := req.Valid(); err != nil {
invalidData(w, err)
invalidData(w, err, h.Logger)
return
}
@ -256,7 +256,7 @@ func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) {
if err := h.ServersStore.Update(ctx, srv); err != nil {
msg := fmt.Sprintf("Error updating kapacitor ID %d", id)
Error(w, http.StatusInternalServerError, msg)
Error(w, http.StatusInternalServerError, msg, h.Logger)
return
}
@ -268,20 +268,20 @@ func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) {
func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
@ -295,7 +295,7 @@ func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) {
var req chronograf.AlertRule
if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
// TODO: validate this data
@ -308,13 +308,13 @@ func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) {
task, err := c.Create(ctx, req)
if err != nil {
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
req.ID = task.ID
rule, err := h.AlertRulesStore.Add(ctx, srcID, id, req)
if err != nil {
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
@ -348,20 +348,20 @@ type alertResponse struct {
func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
@ -374,7 +374,7 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) {
}
var req chronograf.AlertRule
if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
// TODO: validate this data
@ -388,22 +388,22 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) {
// Check if the rule exists and is scoped correctly
if _, err := h.AlertRulesStore.Get(ctx, srcID, id, tid); err != nil {
if err == chronograf.ErrAlertNotFound {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
req.ID = tid
task, err := c.Update(ctx, c.Href(tid), req)
if err != nil {
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
if err := h.AlertRulesStore.Update(ctx, srcID, id, req); err != nil {
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
@ -423,26 +423,26 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) {
func (h *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
rules, err := h.AlertRulesStore.All(ctx, srcID, id)
if err != nil {
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
@ -454,7 +454,7 @@ func (h *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) {
for _, rule := range rules {
tickscript, err := ticker.Generate(rule)
if err != nil {
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
@ -476,24 +476,24 @@ type allAlertsResponse struct {
Rules []alertResponse `json:"rules"`
}
// KapacitorRulesGet retrieves specific task
// KapacitorRulesID retrieves specific task
func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
tid := httprouter.GetParamFromContext(ctx, "tid")
@ -501,10 +501,10 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) {
rule, err := h.AlertRulesStore.Get(ctx, srcID, id, tid)
if err != nil {
if err == chronograf.ErrAlertNotFound {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
@ -512,7 +512,7 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) {
c := kapa.Client{}
tickscript, err := ticker.Generate(rule)
if err != nil {
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
@ -528,24 +528,24 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) {
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// KapacitosRulesDelete proxies DELETE to kapacitor
// KapacitorRulesDelete proxies DELETE to kapacitor
func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
@ -554,10 +554,10 @@ func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) {
// Check if the rule is linked to this server and kapacitor
if _, err := h.AlertRulesStore.Get(ctx, srcID, id, tid); err != nil {
if err == chronograf.ErrAlertNotFound {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
@ -567,12 +567,12 @@ func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) {
Password: srv.Password,
}
if err := c.Delete(ctx, c.Href(tid)); err != nil {
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}
if err := h.AlertRulesStore.Delete(ctx, srcID, id, chronograf.AlertRule{ID: tid}); err != nil {
Error(w, http.StatusInternalServerError, err.Error())
Error(w, http.StatusInternalServerError, err.Error(), h.Logger)
return
}

View File

@ -32,19 +32,19 @@ func newLayoutResponse(layout chronograf.Layout) layoutResponse {
func (h *Service) NewLayout(w http.ResponseWriter, r *http.Request) {
var layout chronograf.Layout
if err := json.NewDecoder(r.Body).Decode(&layout); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
if err := ValidLayoutRequest(layout); err != nil {
invalidData(w, err)
invalidData(w, err, h.Logger)
return
}
var err error
if layout, err = h.LayoutStore.Add(r.Context(), layout); err != nil {
msg := fmt.Errorf("Error storing layout %v: %v", layout, err)
unknownErrorWithMessage(w, msg)
unknownErrorWithMessage(w, msg, h.Logger)
return
}
@ -72,7 +72,7 @@ func (h *Service) Layouts(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
layouts, err := h.LayoutStore.All(ctx)
if err != nil {
Error(w, http.StatusInternalServerError, "Error loading layouts")
Error(w, http.StatusInternalServerError, "Error loading layouts", h.Logger)
return
}
@ -104,7 +104,7 @@ func (h *Service) LayoutsID(w http.ResponseWriter, r *http.Request) {
layout, err := h.LayoutStore.Get(ctx, id)
if err != nil {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id))
Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id), h.Logger)
return
}
@ -122,7 +122,7 @@ func (h *Service) RemoveLayout(w http.ResponseWriter, r *http.Request) {
}
if err := h.LayoutStore.Delete(ctx, layout); err != nil {
unknownErrorWithMessage(w, err)
unknownErrorWithMessage(w, err, h.Logger)
return
}
@ -136,25 +136,25 @@ func (h *Service) UpdateLayout(w http.ResponseWriter, r *http.Request) {
_, err := h.LayoutStore.Get(ctx, id)
if err != nil {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id))
Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id), h.Logger)
return
}
var req chronograf.Layout
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
req.ID = id
if err := ValidLayoutRequest(req); err != nil {
invalidData(w, err)
invalidData(w, err, h.Logger)
return
}
if err := h.LayoutStore.Update(ctx, req); err != nil {
msg := fmt.Sprintf("Error updating layout ID %s: %v", id, err)
Error(w, http.StatusInternalServerError, msg)
Error(w, http.StatusInternalServerError, msg, h.Logger)
return
}

View File

@ -16,7 +16,7 @@ func (h *Service) GetMappings(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
layouts, err := h.LayoutStore.All(ctx)
if err != nil {
Error(w, http.StatusInternalServerError, "Error loading layouts")
Error(w, http.StatusInternalServerError, "Error loading layouts", h.Logger)
return
}

View File

@ -8,7 +8,7 @@ import (
"strings"
"github.com/bouk/httprouter"
"github.com/influxdata/chronograf" // When julienschmidt/httprouter v2 w/ context is out, switch "github.com/influxdata/chronograf
"github.com/influxdata/chronograf" // When julienschmidt/httprouter v2 w/ context is out, switch
"github.com/influxdata/chronograf/jwt"
)
@ -94,14 +94,13 @@ func NewMux(opts MuxOpts, service Service) http.Handler {
router.DELETE("/chronograf/v1/layouts/:id", service.RemoveLayout)
// Users
/*
router.GET("/chronograf/v1/users", Users)
router.POST("/chronograf/v1/users", NewUser)
router.GET("/chronograf/v1/me", service.Me)
router.POST("/chronograf/v1/users", service.NewUser)
router.GET("/chronograf/v1/users/:id", service.UserID)
router.PATCH("/chronograf/v1/users/:id", service.UpdateUser)
router.DELETE("/chronograf/v1/users/:id", service.RemoveUser)
router.GET("/chronograf/v1/users/:id", UsersID)
router.PATCH("/chronograf/v1/users/:id", UpdateUser)
router.DELETE("/chronograf/v1/users/:id", RemoveUser)
*/
// Explorations
router.GET("/chronograf/v1/users/:id/explorations", service.Explorations)
router.POST("/chronograf/v1/users/:id/explorations", service.NewExploration)
@ -133,7 +132,7 @@ func AuthAPI(opts MuxOpts, router *httprouter.Router) http.Handler {
opts.Logger,
)
router.GET("/oauth", gh.Login())
router.GET("/oauth/github", gh.Login())
router.GET("/oauth/logout", gh.Logout())
router.GET("/oauth/github/callback", gh.Callback())
@ -152,44 +151,45 @@ func encodeJSON(w http.ResponseWriter, status int, v interface{}, logger chronog
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
if err := json.NewEncoder(w).Encode(v); err != nil {
unknownErrorWithMessage(w, err)
unknownErrorWithMessage(w, err, logger)
}
}
// Error writes an JSON message
func Error(w http.ResponseWriter, code int, msg string) {
e := struct {
Code int `json:"code"`
Message string `json:"message"`
}{
func Error(w http.ResponseWriter, code int, msg string, logger chronograf.Logger) {
e := ErrorMessage{
Code: code,
Message: msg,
}
b, err := json.Marshal(e)
if err != nil {
//log.Print("go-oidc: failed to marshal %#v: %v", e, err)
code = http.StatusInternalServerError
b = []byte(`{"code": 500, "message":"server_error"}`)
}
logger.
WithField("component", "server").
WithField("http_status ", code).
Error("Error message ", msg)
w.Header().Set("Content-Type", JSONType)
w.WriteHeader(code)
w.Write(b)
}
func invalidData(w http.ResponseWriter, err error) {
Error(w, http.StatusUnprocessableEntity, fmt.Sprintf("%v", err))
func invalidData(w http.ResponseWriter, err error, logger chronograf.Logger) {
Error(w, http.StatusUnprocessableEntity, fmt.Sprintf("%v", err), logger)
}
func invalidJSON(w http.ResponseWriter) {
Error(w, http.StatusBadRequest, "Unparsable JSON")
func invalidJSON(w http.ResponseWriter, logger chronograf.Logger) {
Error(w, http.StatusBadRequest, "Unparsable JSON", logger)
}
func unknownErrorWithMessage(w http.ResponseWriter, err error) {
Error(w, http.StatusInternalServerError, fmt.Sprintf("Unknown error: %v", err))
func unknownErrorWithMessage(w http.ResponseWriter, err error, logger chronograf.Logger) {
Error(w, http.StatusInternalServerError, fmt.Sprintf("Unknown error: %v", err), logger)
}
func notFound(w http.ResponseWriter, id int) {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id))
func notFound(w http.ResponseWriter, id int, logger chronograf.Logger) {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id), logger)
}
func paramID(key string, r *http.Request) (int, error) {

View File

@ -26,30 +26,30 @@ type postProxyResponse struct {
func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
var req chronograf.Query
if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
if err = ValidProxyRequest(req); err != nil {
invalidData(w, err)
invalidData(w, err, h.Logger)
return
}
ctx := r.Context()
src, err := h.SourcesStore.Get(ctx, id)
if err != nil {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
if err = h.TimeSeries.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d", id)
Error(w, http.StatusBadRequest, msg)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
@ -57,11 +57,11 @@ func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) {
if err != nil {
if err == chronograf.ErrUpstreamTimeout {
msg := "Timeout waiting for Influx response"
Error(w, http.StatusRequestTimeout, msg)
Error(w, http.StatusRequestTimeout, msg, h.Logger)
return
}
// TODO: Here I want to return the error code from influx.
Error(w, http.StatusBadRequest, err.Error())
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
@ -75,33 +75,33 @@ func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) {
func (h *Service) KapacitorProxy(w http.ResponseWriter, r *http.Request) {
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
path := r.URL.Query().Get("path")
if path == "" {
Error(w, http.StatusUnprocessableEntity, "path query parameter required")
Error(w, http.StatusUnprocessableEntity, "path query parameter required", h.Logger)
return
}
ctx := r.Context()
srv, err := h.ServersStore.Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
u, err := url.Parse(srv.URL)
if err != nil {
msg := fmt.Sprintf("Error parsing kapacitor url: %v", err)
Error(w, http.StatusUnprocessableEntity, msg)
Error(w, http.StatusUnprocessableEntity, msg, h.Logger)
return
}

View File

@ -11,6 +11,7 @@ type getRoutesResponse struct {
Mappings string `json:"mappings"` // Location of the application mappings endpoint
Sources string `json:"sources"` // Location of the sources endpoint
Users string `json:"users"` // Location of the users endpoint
Me string `json:"me"` // Location of the me endpoint
}
// AllRoutes returns all top level routes within chronograf
@ -19,6 +20,7 @@ func AllRoutes(logger chronograf.Logger) http.HandlerFunc {
Sources: "/chronograf/v1/sources",
Layouts: "/chronograf/v1/layouts",
Users: "/chronograf/v1/users",
Me: "/chronograf/v1/me",
Mappings: "/chronograf/v1/mappings",
}

View File

@ -47,6 +47,7 @@ type Server struct {
handler http.Handler
}
// BuildInfo is sent to the usage client to track versions and commits
type BuildInfo struct {
Version string
Commit string
@ -59,7 +60,7 @@ func (s *Server) useAuth() bool {
// Serve starts and runs the chronograf server
func (s *Server) Serve() error {
logger := clog.New(clog.ParseLevel(s.LogLevel))
service := openService(s.BoltPath, s.CannedPath, logger)
service := openService(s.BoltPath, s.CannedPath, logger, s.useAuth())
s.handler = NewMux(MuxOpts{
Develop: s.Develop,
TokenSecret: s.TokenSecret,
@ -105,7 +106,7 @@ func (s *Server) Serve() error {
return nil
}
func openService(boltPath, cannedPath string, logger chronograf.Logger) Service {
func openService(boltPath, cannedPath string, logger chronograf.Logger, useAuth bool) Service {
db := bolt.NewClient()
db.Path = boltPath
if err := db.Open(); err != nil {
@ -136,11 +137,14 @@ func openService(boltPath, cannedPath string, logger chronograf.Logger) Service
ExplorationStore: db.ExplorationStore,
SourcesStore: db.SourcesStore,
ServersStore: db.ServersStore,
UsersStore: db.UsersStore,
TimeSeries: &influx.Client{
Logger: logger,
},
LayoutStore: layouts,
AlertRulesStore: db.AlertsStore,
Logger: logger,
UseAuth: useAuth,
}
}

View File

@ -9,6 +9,14 @@ type Service struct {
ServersStore chronograf.ServersStore
LayoutStore chronograf.LayoutStore
AlertRulesStore chronograf.AlertRulesStore
UsersStore chronograf.UsersStore
TimeSeries chronograf.TimeSeries
Logger chronograf.Logger
UseAuth bool
}
// ErrorMessage is the error response format for all service errors
type ErrorMessage struct {
Code int `json:"code"`
Message string `json:"message"`
}

View File

@ -21,6 +21,11 @@ type sourceResponse struct {
}
func newSourceResponse(src chronograf.Source) sourceResponse {
// If telegraf is not set, we'll set it to the default value.
if src.Telegraf == "" {
src.Telegraf = "telegraf"
}
httpAPISrcs := "/chronograf/v1/sources"
return sourceResponse{
Source: src,
@ -36,18 +41,24 @@ func newSourceResponse(src chronograf.Source) sourceResponse {
func (h *Service) NewSource(w http.ResponseWriter, r *http.Request) {
var src chronograf.Source
if err := json.NewDecoder(r.Body).Decode(&src); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
if err := ValidSourceRequest(src); err != nil {
invalidData(w, err)
invalidData(w, err, h.Logger)
return
}
// By default the telegraf database will be telegraf
if src.Telegraf == "" {
src.Telegraf = "telegraf"
}
var err error
if src, err = h.SourcesStore.Add(r.Context(), src); err != nil {
msg := fmt.Errorf("Error storing source %v: %v", src, err)
unknownErrorWithMessage(w, msg)
unknownErrorWithMessage(w, msg, h.Logger)
return
}
@ -65,7 +76,7 @@ func (h *Service) Sources(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcs, err := h.SourcesStore.All(ctx)
if err != nil {
Error(w, http.StatusInternalServerError, "Error loading sources")
Error(w, http.StatusInternalServerError, "Error loading sources", h.Logger)
return
}
@ -84,14 +95,14 @@ func (h *Service) Sources(w http.ResponseWriter, r *http.Request) {
func (h *Service) SourcesID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
src, err := h.SourcesStore.Get(ctx, id)
if err != nil {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
@ -103,14 +114,14 @@ func (h *Service) SourcesID(w http.ResponseWriter, r *http.Request) {
func (h *Service) RemoveSource(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src := chronograf.Source{ID: id}
ctx := r.Context()
if err = h.SourcesStore.Delete(ctx, src); err != nil {
unknownErrorWithMessage(w, err)
unknownErrorWithMessage(w, err, h.Logger)
return
}
@ -121,20 +132,20 @@ func (h *Service) RemoveSource(w http.ResponseWriter, r *http.Request) {
func (h *Service) UpdateSource(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error())
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
src, err := h.SourcesStore.Get(ctx, id)
if err != nil {
notFound(w, id)
notFound(w, id, h.Logger)
return
}
var req chronograf.Source
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w)
invalidJSON(w, h.Logger)
return
}
@ -154,15 +165,18 @@ func (h *Service) UpdateSource(w http.ResponseWriter, r *http.Request) {
if req.Type != "" {
src.Type = req.Type
}
if req.Telegraf != "" {
src.Telegraf = req.Telegraf
}
if err := ValidSourceRequest(src); err != nil {
invalidData(w, err)
invalidData(w, err, h.Logger)
return
}
if err := h.SourcesStore.Update(ctx, src); err != nil {
msg := fmt.Sprintf("Error updating source ID %d", id)
Error(w, http.StatusInternalServerError, msg)
Error(w, http.StatusInternalServerError, msg, h.Logger)
return
}
encodeJSON(w, http.StatusOK, newSourceResponse(src), h.Logger)

58
server/sources_test.go Normal file
View File

@ -0,0 +1,58 @@
package server
import (
"reflect"
"testing"
"github.com/influxdata/chronograf"
)
func Test_newSourceResponse(t *testing.T) {
tests := []struct {
name string
src chronograf.Source
want sourceResponse
}{
{
name: "Test empty telegraf",
src: chronograf.Source{
ID: 1,
Telegraf: "",
},
want: sourceResponse{
Source: chronograf.Source{
ID: 1,
Telegraf: "telegraf",
},
Links: sourceLinks{
Self: "/chronograf/v1/sources/1",
Proxy: "/chronograf/v1/sources/1/proxy",
Kapacitors: "/chronograf/v1/sources/1/kapacitors",
},
},
},
{
name: "Test non-default telegraf",
src: chronograf.Source{
ID: 1,
Telegraf: "howdy",
},
want: sourceResponse{
Source: chronograf.Source{
ID: 1,
Telegraf: "howdy",
},
Links: sourceLinks{
Self: "/chronograf/v1/sources/1",
Proxy: "/chronograf/v1/sources/1/proxy",
Kapacitors: "/chronograf/v1/sources/1/kapacitors",
},
},
},
}
for _, tt := range tests {
if got := newSourceResponse(tt.src); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. newSourceResponse() = %v, want %v", tt.name, got, tt.want)
}
}
}

File diff suppressed because it is too large Load Diff

180
server/users.go Normal file
View File

@ -0,0 +1,180 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"golang.org/x/net/context"
"github.com/influxdata/chronograf"
)
type userLinks struct {
Self string `json:"self"` // Self link mapping to this resource
Explorations string `json:"explorations"` // URL for explorations endpoint
}
type userResponse struct {
*chronograf.User
Links userLinks `json:"links"`
}
func newUserResponse(usr *chronograf.User) userResponse {
base := "/chronograf/v1/users"
return userResponse{
User: usr,
Links: userLinks{
Self: fmt.Sprintf("%s/%d", base, usr.ID),
Explorations: fmt.Sprintf("%s/%d/explorations", base, usr.ID),
},
}
}
// NewUser adds a new valid user to the store
func (h *Service) NewUser(w http.ResponseWriter, r *http.Request) {
var usr *chronograf.User
if err := json.NewDecoder(r.Body).Decode(usr); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := ValidUserRequest(usr); err != nil {
invalidData(w, err, h.Logger)
return
}
var err error
if usr, err = h.UsersStore.Add(r.Context(), usr); err != nil {
msg := fmt.Errorf("error storing user %v: %v", *usr, err)
unknownErrorWithMessage(w, msg, h.Logger)
return
}
res := newUserResponse(usr)
w.Header().Add("Location", res.Links.Self)
encodeJSON(w, http.StatusCreated, res, h.Logger)
}
// UserID retrieves a user from the store
func (h *Service) UserID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
usr, err := h.UsersStore.Get(ctx, chronograf.UserID(id))
if err != nil {
notFound(w, id, h.Logger)
return
}
res := newUserResponse(usr)
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// RemoveUser deletes the user from the store
func (h *Service) RemoveUser(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
usr := &chronograf.User{ID: chronograf.UserID(id)}
ctx := r.Context()
if err = h.UsersStore.Delete(ctx, usr); err != nil {
unknownErrorWithMessage(w, err, h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// UpdateUser handles incremental updates of a data user
func (h *Service) UpdateUser(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
ctx := r.Context()
usr, err := h.UsersStore.Get(ctx, chronograf.UserID(id))
if err != nil {
notFound(w, id, h.Logger)
return
}
var req chronograf.User
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
usr.Email = req.Email
if err := ValidUserRequest(usr); err != nil {
invalidData(w, err, h.Logger)
return
}
if err := h.UsersStore.Update(ctx, usr); err != nil {
msg := fmt.Sprintf("Error updating user ID %d", id)
Error(w, http.StatusInternalServerError, msg, h.Logger)
return
}
encodeJSON(w, http.StatusOK, newUserResponse(usr), h.Logger)
}
// ValidUserRequest checks if email is nonempty
func ValidUserRequest(s *chronograf.User) error {
// email is required
if s.Email == "" {
return fmt.Errorf("Email required")
}
return nil
}
func getEmail(ctx context.Context) (string, error) {
principal := ctx.Value(chronograf.PrincipalKey).(chronograf.Principal)
if principal == "" {
return "", fmt.Errorf("Token not found")
}
return string(principal), nil
}
// Me does a findOrCreate based on the email in the context
func (h *Service) Me(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if !h.UseAuth {
Error(w, http.StatusTeapot, fmt.Sprintf("%v", "Go to line 151 users.go. Look for Arnold"), h.Logger)
_ = 42 // did you mean to learn the answer? if so go to line aslfjasdlfja; (gee willickers.... tbc)
return
}
email, err := getEmail(ctx)
if err != nil {
invalidData(w, err, h.Logger)
return
}
usr, err := h.UsersStore.FindByEmail(ctx, email)
if err == nil {
res := newUserResponse(usr)
encodeJSON(w, http.StatusOK, res, h.Logger)
return
}
// Because we didnt find a user, making a new one
user := &chronograf.User{
Email: email,
}
user, err = h.UsersStore.Add(ctx, user)
if err != nil {
msg := fmt.Errorf("error storing user %v: %v", user, err)
unknownErrorWithMessage(w, msg, h.Logger)
return
}
res := newUserResponse(user)
encodeJSON(w, http.StatusOK, res, h.Logger)
}

View File

@ -1,68 +0,0 @@
import PermissionsTable from 'src/shared/components/PermissionsTable';
import React from 'react';
import {shallow} from 'enzyme';
import sinon from 'sinon';
describe('Shared.Components.PermissionsTable', function() {
it('renders a row for each permission', function() {
const permissions = [
{name: 'ViewChronograf', displayName: 'View Chronograf', description: 'Can use Chronograf tools', resources: ['db1']},
{name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']},
];
const wrapper = shallow(
<PermissionsTable
permissions={permissions}
showAddResource={true}
onRemovePermission={sinon.spy()}
/>
);
expect(wrapper.find('tr').length).to.equal(2);
expect(wrapper.find('table').text()).to.match(/View Chronograf/);
expect(wrapper.find('table').text()).to.match(/db1/);
expect(wrapper.find('table').text()).to.match(/Read/);
expect(wrapper.find('table').text()).to.match(/All Databases/);
});
it('only renders the control to add a resource when specified', function() {
const wrapper = shallow(
<PermissionsTable
permissions={[{name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']}]}
showAddResource={false}
onRemovePermission={sinon.spy()}
/>
);
expect(wrapper.find('.pill-add').length).to.equal(0);
});
it('only renders the "Remove" control when a callback is provided', function() {
const wrapper = shallow(
<PermissionsTable
permissions={[{name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']}]}
showAddResource={true}
/>
);
expect(wrapper.find('.remove-permission').length).to.equal(0);
});
describe('when a user clicks "Remove"', function() {
it('fires a callback', function() {
const permission = {name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']};
const cb = sinon.spy();
const wrapper = shallow(
<PermissionsTable
permissions={[permission]}
showAddResource={false}
onRemovePermission={cb}
/>
);
wrapper.find('button[children="Remove"]').at(0).simulate('click');
expect(cb.calledWith(permission)).to.be.true;
});
});
});

View File

@ -0,0 +1,88 @@
import timeSeriesToDygraph from 'src/utils/timeSeriesToDygraph';
describe('timeSeriesToDygraph', () => {
it('parses a raw InfluxDB response into a dygraph friendly data format', () => {
const influxResponse = [
{
"response":
{
"results": [
{
"series": [
{
"name":"m1",
"columns": ["time","f1"],
"values": [[1000, 1],[2000, 2]],
},
]
},
{
"series": [
{
"name":"m1",
"columns": ["time","f2"],
"values": [[2000, 3],[4000, 4]],
},
]
},
],
},
}
];
const actual = timeSeriesToDygraph(influxResponse);
const expected = {
fields: [
'time',
`m1.f1`,
`m1.f2`,
],
timeSeries: [
[new Date(1000), 1, null],
[new Date(2000), 2, 3],
[new Date(4000), null, 4],
],
};
expect(actual).to.deep.equal(expected);
});
it('can sort numerical timestamps correctly', () => {
const influxResponse = [
{
"response":
{
"results": [
{
"series": [
{
"name":"m1",
"columns": ["time","f1"],
"values": [[100, 1],[3000, 3],[200, 2]],
},
]
},
],
},
}
];
const actual = timeSeriesToDygraph(influxResponse);
const expected = {
fields: [
'time',
'm1.f1',
],
timeSeries: [
[new Date(100), 1],
[new Date(200), 2],
[new Date(3000), 3],
],
};
expect(actual).to.deep.equal(expected);
});
});

View File

@ -53,7 +53,7 @@ const CheckSources = React.createClass({
const {isFetching, sources} = nextState;
const source = sources.find((s) => s.id === params.sourceID);
if (!isFetching && !source) {
return router.push(`/?redirectPath=${location.pathname}`);
return router.push(`/sources/new?redirectPath=${location.pathname}`);
}
if (!isFetching && !location.pathname.includes("/manage-sources")) {

View File

@ -1,9 +1,9 @@
import React, {PropTypes} from 'react';
import {Link} from 'react-router';
import AlertsTable from '../components/AlertsTable';
import {getAlerts} from '../apis';
import AJAX from 'utils/ajax';
import _ from 'lodash';
import NoKapacitorError from '../../shared/components/NoKapacitorError';
// Kevin: because we were getting strange errors saying
// "Failed prop type: Required prop `source` was not specified in `AlertsApp`."
@ -83,16 +83,10 @@ const AlertsApp = React.createClass({
const {source} = this.props;
if (this.state.hasKapacitor) {
component = (
<AlertsTable source={this.props.source} alerts={this.state.alerts} />
<AlertsTable source={source} alerts={this.state.alerts} />
);
} else {
const path = `/sources/${source.id}/kapacitor-config`;
component = (
<div>
<p>The current source does not have an associated Kapacitor instance, please configure one.</p>
<Link to={path}>Add Kapacitor</Link>
</div>
);
component = <NoKapacitorError source={source} />;
}
}
return component;

12
ui/src/auth/Login.js Normal file
View File

@ -0,0 +1,12 @@
import React from 'react';
import {withRouter} from 'react-router';
const Login = React.createClass({
render() {
return (
<a className="btn btn-primary" href="/oauth/github">Click me to log in</a>
);
},
});
export default withRouter(Login);

2
ui/src/auth/index.js Normal file
View File

@ -0,0 +1,2 @@
import Login from './Login';
export {Login};

View File

@ -2,11 +2,11 @@ import {proxy} from 'utils/queryUrlGenerator';
import AJAX from 'utils/ajax';
import _ from 'lodash';
export function getCpuAndLoadForHosts(proxyLink) {
export function getCpuAndLoadForHosts(proxyLink, telegrafDB) {
return proxy({
source: proxyLink,
query: `select mean(usage_user) from cpu where cpu = 'cpu-total' and time > now() - 10m group by host; select mean("load1") from "telegraf".."system" where time > now() - 10m group by host; select mean("Percent_Processor_Time") from win_cpu where time > now() - 10m group by host; select mean("Processor_Queue_Length") from win_system where time > now() - 10s group by host`,
db: 'telegraf',
query: `select mean(usage_user) from cpu where cpu = 'cpu-total' and time > now() - 10m group by host; select mean("load1") from "system" where time > now() - 10m group by host; select mean("Percent_Processor_Time") from win_cpu where time > now() - 10m group by host; select mean("Processor_Queue_Length") from win_system where time > now() - 10s group by host`,
db: telegrafDB,
}).then((resp) => {
const hosts = {};
const precision = 100;
@ -51,13 +51,13 @@ export function getMappings() {
});
}
export function getAppsForHosts(proxyLink, hosts, appMappings) {
export function getAppsForHosts(proxyLink, hosts, appMappings, telegrafDB) {
const measurements = appMappings.map((m) => `^${m.measurement}$`).join('|');
const measurementsToApps = _.zipObject(appMappings.map(m => m.measurement), appMappings.map(m => m.name));
return proxy({
source: proxyLink,
query: `show series from /${measurements}/`,
db: 'telegraf',
db: telegrafDB,
}).then((resp) => {
const newHosts = Object.assign({}, hosts);
const allSeries = _.get(resp, ['data', 'results', '0', 'series', '0', 'values'], []);
@ -81,3 +81,28 @@ export function getAppsForHosts(proxyLink, hosts, appMappings) {
return newHosts;
});
}
export function getMeasurementsForHost(source, host) {
return proxy({
source: source.links.proxy,
query: `SHOW MEASUREMENTS WHERE "host" = '${host}'`,
db: source.telegraf,
}).then(({data}) => {
if (_isEmpty(data) || _hasError(data)) {
return [];
}
const series = data.results[0].series[0];
return series.values.map((measurement) => {
return measurement[0];
});
});
}
function _isEmpty(resp) {
return !resp.results[0].series;
}
function _hasError(resp) {
return !!resp.results[0].error;
}

View File

@ -2,7 +2,7 @@ import React, {PropTypes} from 'react';
import LayoutRenderer from 'shared/components/LayoutRenderer';
import TimeRangeDropdown from '../../shared/components/TimeRangeDropdown';
import timeRanges from 'hson!../../shared/data/timeRanges.hson';
import {getMappings, getAppsForHosts} from '../apis';
import {getMappings, getAppsForHosts, getMeasurementsForHost} from 'src/hosts/apis';
import {fetchLayouts} from 'shared/apis';
export const HostPage = React.createClass({
@ -11,6 +11,7 @@ export const HostPage = React.createClass({
links: PropTypes.shape({
proxy: PropTypes.string.isRequired,
}).isRequired,
telegraf: PropTypes.string.isRequired,
}),
params: PropTypes.shape({
hostID: PropTypes.string.isRequired,
@ -32,21 +33,25 @@ export const HostPage = React.createClass({
},
componentDidMount() {
const hosts = {[this.props.params.hostID]: {name: this.props.params.hostID}};
const {source, params} = this.props;
const hosts = {[params.hostID]: {name: params.hostID}};
// fetching layouts and mappings can be done at the same time
fetchLayouts().then(({data: {layouts}}) => {
getMappings().then(({data: {mappings}}) => {
getAppsForHosts(this.props.source.links.proxy, hosts, mappings).then((newHosts) => {
const host = newHosts[this.props.params.hostID];
const filteredLayouts = layouts.filter((layout) => {
const focusedApp = this.props.location.query.app;
if (focusedApp) {
return layout.app === focusedApp;
}
return host.apps && host.apps.includes(layout.app);
getAppsForHosts(source.links.proxy, hosts, mappings, source.telegraf).then((newHosts) => {
getMeasurementsForHost(source, params.hostID).then((measurements) => {
const host = newHosts[this.props.params.hostID];
const filteredLayouts = layouts.filter((layout) => {
const focusedApp = this.props.location.query.app;
if (focusedApp) {
return layout.app === focusedApp;
}
return host.apps && host.apps.includes(layout.app) && measurements.includes(layout.measurement);
});
this.setState({layouts: filteredLayouts});
});
this.setState({layouts: filteredLayouts});
});
});
});
@ -60,7 +65,7 @@ export const HostPage = React.createClass({
renderLayouts(layouts) {
const autoRefreshMs = 15000;
const {timeRange} = this.state;
const source = this.props.source.links.proxy;
const {source} = this.props;
let layoutCells = [];
layouts.forEach((layout) => {
@ -70,7 +75,7 @@ export const HostPage = React.createClass({
layoutCells.forEach((cell, i) => {
cell.queries.forEach((q) => {
q.text = q.query;
q.database = q.db;
q.database = source.telegraf;
});
cell.x = (i * 4 % 12); // eslint-disable-line no-magic-numbers
cell.y = 0;
@ -81,7 +86,7 @@ export const HostPage = React.createClass({
timeRange={timeRange}
cells={layoutCells}
autoRefreshMs={autoRefreshMs}
source={source}
source={source.links.proxy}
host={this.props.params.hostID}
/>
);

View File

@ -12,6 +12,7 @@ export const HostsPage = React.createClass({
links: PropTypes.shape({
proxy: PropTypes.string.isRequired,
}).isRequired,
telegraf: PropTypes.string.isRequired,
}),
addFlashMessage: PropTypes.func,
},
@ -25,11 +26,11 @@ export const HostsPage = React.createClass({
componentDidMount() {
const {source, addFlashMessage} = this.props;
Promise.all([
getCpuAndLoadForHosts(source.links.proxy),
getCpuAndLoadForHosts(source.links.proxy, source.telegraf),
getMappings(),
]).then(([hosts, {data: {mappings}}]) => {
this.setState({hosts});
getAppsForHosts(source.links.proxy, hosts, mappings).then((newHosts) => {
getAppsForHosts(source.links.proxy, hosts, mappings, source.telegraf).then((newHosts) => {
this.setState({hosts: newHosts});
}).catch(() => {
addFlashMessage({type: 'error', text: 'Unable to get apps for hosts'});

View File

@ -1,25 +1,24 @@
import React, {PropTypes} from 'react';
import React from 'react';
import {render} from 'react-dom';
import {Provider} from 'react-redux';
import {Router, Route, browserHistory} from 'react-router';
import {Router, Route, browserHistory, Redirect} from 'react-router';
import App from 'src/App';
import AlertsApp from 'src/alerts';
import CheckSources from 'src/CheckSources';
import {HostsPage, HostPage} from 'src/hosts';
import {KubernetesPage} from 'src/kubernetes';
import {Login} from 'src/auth';
import {KapacitorPage, KapacitorRulePage, KapacitorRulesPage, KapacitorTasksPage} from 'src/kapacitor';
import DataExplorer from 'src/chronograf';
import {CreateSource, SourceForm, ManageSources} from 'src/sources';
import NotFound from 'src/shared/components/NotFound';
import NoClusterError from 'src/shared/components/NoClusterError';
import configureStore from 'src/store/configureStore';
import {getSources} from 'shared/apis';
import {getMe, getSources} from 'shared/apis';
import {receiveMe} from 'shared/actions/me';
import 'src/style/enterprise_style/application.scss';
const {number, shape, string, bool} = PropTypes;
const defaultTimeRange = {upper: null, lower: 'now() - 15m'};
const lsTimeRange = window.localStorage.getItem('timeRange');
const parsedTimeRange = JSON.parse(lsTimeRange) || {};
@ -28,38 +27,15 @@ const timeRange = Object.assign(defaultTimeRange, parsedTimeRange);
const store = configureStore({timeRange});
const rootNode = document.getElementById('react-root');
const HTTP_SERVER_ERROR = 500;
const Root = React.createClass({
getInitialState() {
return {
me: {
id: 1,
name: 'Chronograf',
email: 'foo@example.com',
admin: true,
},
isFetching: false,
hasReadPermission: false,
clusterStatus: null,
loggedIn: null,
};
},
childContextTypes: {
me: shape({
id: number.isRequired,
name: string.isRequired,
email: string.isRequired,
admin: bool.isRequired,
}),
componentDidMount() {
this.checkAuth();
},
getChildContext() {
return {
me: this.state.me,
};
},
activeSource(sources) {
const defaultSource = sources.find((s) => s.default);
if (defaultSource && defaultSource.id) {
@ -68,29 +44,53 @@ const Root = React.createClass({
return sources[0];
},
redirectToHosts(_, replace, callback) {
redirectFromRoot(_, replace, callback) {
getSources().then(({data: {sources}}) => {
if (sources && sources.length) {
const path = `/sources/${this.activeSource(sources).id}/hosts`;
replace(path);
}
callback();
}).catch(callback);
});
},
checkAuth() {
if (store.getState().me.links) {
return this.setState({loggedIn: true});
}
getMe().then(({data: me}) => {
store.dispatch(receiveMe(me));
this.setState({loggedIn: true});
}).catch((err) => {
const AUTH_DISABLED = 418;
if (err.response.status === AUTH_DISABLED) {
return this.setState({loggedIn: true});
// Could store a boolean indicating auth is not set up
}
this.setState({loggedIn: false});
});
},
render() {
if (this.state.isFetching) {
return null;
if (this.state.loggedIn === null) {
return <div className="page-spinner"></div>;
}
if (this.state.clusterStatus === HTTP_SERVER_ERROR) {
return <NoClusterError />;
if (this.state.loggedIn === false) {
return (
<Provider store={store}>
<Router history={browserHistory}>
<Route path="/login" component={Login} />
<Redirect from="*" to="/login" />
</Router>
</Provider>
);
}
return (
<Provider store={store}>
<Router history={browserHistory}>
<Route path="/" component={CreateSource} onEnter={this.redirectToHosts} />
<Route path="/" component={CreateSource} onEnter={this.redirectFromRoot} />
<Route path="/sources/new" component={CreateSource} />
<Route path="/sources/:sourceID" component={App}>
<Route component={CheckSources}>
<Route path="manage-sources" component={ManageSources} />

View File

@ -43,16 +43,14 @@ export function loadDefaultRule() {
};
}
export function fetchRules(source) {
export function fetchRules(kapacitor) {
return (dispatch) => {
getKapacitor(source).then((kapacitor) => {
getRules(kapacitor).then(({data: {rules}}) => {
dispatch({
type: 'LOAD_RULES',
payload: {
rules,
},
});
getRules(kapacitor).then(({data: {rules}}) => {
dispatch({
type: 'LOAD_RULES',
payload: {
rules,
},
});
});
};

View File

@ -2,11 +2,14 @@ import React, {PropTypes} from 'react';
import {connect} from 'react-redux';
import {bindActionCreators} from 'redux';
import {Link} from 'react-router';
import * as kapacitorActionCreators from 'src/kapacitor/actions/view';
import {getKapacitor} from 'src/shared/apis';
import * as kapacitorActionCreators from '../actions/view';
import NoKapacitorError from '../../shared/components/NoKapacitorError';
export const KapacitorRulesPage = React.createClass({
propTypes: {
source: PropTypes.shape({
id: PropTypes.string.isRequired,
links: PropTypes.shape({
proxy: PropTypes.string.isRequired,
self: PropTypes.string.isRequired,
@ -26,8 +29,20 @@ export const KapacitorRulesPage = React.createClass({
addFlashMessage: PropTypes.func,
},
getInitialState() {
return {
hasKapacitor: false,
loading: true,
};
},
componentDidMount() {
this.props.actions.fetchRules(this.props.source);
getKapacitor(this.props.source).then((kapacitor) => {
if (kapacitor) {
this.props.actions.fetchRules(kapacitor);
}
this.setState({loading: false, hasKapacitor: !!kapacitor});
});
},
handleDeleteRule(rule) {
@ -35,9 +50,45 @@ export const KapacitorRulesPage = React.createClass({
actions.deleteRule(rule);
},
render() {
renderSubComponent() {
const {source} = this.props;
const {hasKapacitor, loading} = this.state;
let component;
if (loading) {
component = (<p>Loading...</p>);
} else if (hasKapacitor) {
component = (
<div className="panel panel-minimal">
<div className="panel-heading u-flex u-ai-center u-jc-space-between">
<h2 className="panel-title">Alert Rules</h2>
<Link to={`/sources/${source.id}/alert-rules/new`} className="btn btn-sm btn-primary">Create New Rule</Link>
</div>
<div className="panel-body">
<table className="table v-center">
<thead>
<tr>
<th>Name</th>
<th>Trigger</th>
<th>Message</th>
<th>Alerts</th>
<th></th>
</tr>
</thead>
<tbody>
{this.renderAlertsTableRows()}
</tbody>
</table>
</div>
</div>
);
} else {
component = <NoKapacitorError source={source} />;
}
return component;
},
render() {
return (
<div className="kapacitor-rules-page">
<div className="chronograf-header">
@ -49,28 +100,7 @@ export const KapacitorRulesPage = React.createClass({
</div>
<div className="hosts-page-scroll-container">
<div className="container-fluid">
<div className="panel panel-minimal">
<div className="panel-heading u-flex u-ai-center u-jc-space-between">
<h2 className="panel-title">Alert Rules</h2>
<Link to={`/sources/${source.id}/alert-rules/new`} className="btn btn-sm btn-primary">Create New Rule</Link>
</div>
<div className="panel-body">
<table className="table v-center">
<thead>
<tr>
<th>Name</th>
<th>Trigger</th>
<th>Message</th>
<th>Alerts</th>
<th></th>
</tr>
</thead>
<tbody>
{this.renderAlertsTableRows()}
</tbody>
</table>
</div>
</div>
{this.renderSubComponent()}
</div>
</div>
</div>

View File

@ -9,6 +9,7 @@ export const KubernetesPage = React.createClass({
links: PropTypes.shape({
proxy: PropTypes.string.isRequired,
}).isRequired,
telegraf: PropTypes.string.isRequired,
}),
layouts: PropTypes.arrayOf(PropTypes.shape().isRequired).isRequired,
},
@ -23,7 +24,7 @@ export const KubernetesPage = React.createClass({
renderLayouts(layouts) {
const autoRefreshMs = 15000;
const {timeRange} = this.state;
const source = this.props.source.links.proxy;
const {source} = this.props;
let layoutCells = [];
layouts.forEach((layout) => {
@ -33,7 +34,7 @@ export const KubernetesPage = React.createClass({
layoutCells.forEach((cell, i) => {
cell.queries.forEach((q) => {
q.text = q.query;
q.database = q.db;
q.database = source.telegraf;
});
cell.x = (i * 4 % 12); // eslint-disable-line no-magic-numbers
cell.y = 0;
@ -44,7 +45,7 @@ export const KubernetesPage = React.createClass({
timeRange={timeRange}
cells={layoutCells}
autoRefreshMs={autoRefreshMs}
source={source}
source={source.links.proxy}
/>
);
},
@ -66,12 +67,12 @@ export const KubernetesPage = React.createClass({
return (
<div className="host-dashboard hosts-page">
<div className="enterprise-header hosts-dashboard-header">
<div className="enterprise-header__container">
<div className="enterprise-header__left">
<div className="chronograf-header hosts-dashboard-header">
<div className="chronograf-header__container">
<div className="chronograf-header__left">
<h1>Kubernetes Dashboard</h1>
</div>
<div className="enterprise-header__right">
<div className="chronograf-header__right">
<h1>Range:</h1>
<TimeRangeDropdown onChooseTimeRange={this.handleChooseTimeRange} selected={timeRange.inputValue} />
</div>

View File

@ -0,0 +1,14 @@
export function receiveMe(me) {
return {
type: 'ME_RECEIVED',
payload: {
me,
},
};
}
export function logout() {
return {
type: 'LOGOUT',
};
}

View File

@ -7,6 +7,13 @@ export function fetchLayouts() {
});
}
export function getMe() {
return AJAX({
url: `/chronograf/v1/me`,
method: 'GET',
});
}
export function getSources() {
return AJAX({
url: '/chronograf/v1/sources',

View File

@ -1,79 +0,0 @@
import React, {PropTypes} from 'react';
const {arrayOf, number, shape, func, string} = PropTypes;
const AddClusterAccounts = React.createClass({
propTypes: {
clusters: arrayOf(shape({
id: number.isRequired,
cluster_users: arrayOf(shape({
name: string.isRequired,
})),
dipslay_name: string,
cluster_id: string.isRequired,
})).isRequired,
onSelectClusterAccount: func.isRequired,
headerText: string,
},
getDefaultProps() {
return {
headerText: 'Pair With Cluster Accounts',
};
},
handleSelectClusterAccount(e, clusterID) {
this.props.onSelectClusterAccount({
clusterID,
accountName: e.target.value,
});
},
render() {
return (
<div>
{
this.props.clusters.map((cluster, i) => {
return (
<div key={i} className="form-grid">
<div className="form-group col-sm-6">
{i === 0 ? <label>Cluster</label> : null}
<div className="form-control-static">
{cluster.display_name || cluster.cluster_id}
</div>
</div>
<div className="form-group col-sm-6">
{i === 0 ? <label>Account</label> : null}
{this.renderClusterUsers(cluster)}
</div>
</div>
);
})
}
</div>
);
},
renderClusterUsers(cluster) {
if (!cluster.cluster_users) {
return (
<select disabled={true} defaultValue="No cluster accounts" className="form-control" id="cluster-account">
<option>No cluster accounts</option>
</select>
);
}
return (
<select onChange={(e) => this.handleSelectClusterAccount(e, cluster.cluster_id)} className="form-control">
<option value="">No Association</option>
{
cluster.cluster_users.map((cu) => {
return <option value={cu.name} key={cu.name}>{cu.name}</option>;
})
}
</select>
);
},
});
export default AddClusterAccounts;

View File

@ -1,124 +0,0 @@
import React, {PropTypes} from 'react';
const CLUSTER_WIDE_PERMISSIONS = ["CreateDatabase", "AddRemoveNode", "ManageShard", "DropDatabase", "CopyShard", "Rebalance"];
const AddPermissionModal = React.createClass({
propTypes: {
activeCluster: PropTypes.string.isRequired,
permissions: PropTypes.arrayOf(PropTypes.shape({
name: PropTypes.string.isRequired,
displayName: PropTypes.string.isRequired,
description: PropTypes.string.isRequired,
})),
databases: PropTypes.arrayOf(PropTypes.string.isRequired).isRequired,
onAddPermission: PropTypes.func.isRequired,
},
getInitialState() {
return {
selectedPermission: null,
selectedDatabase: '',
};
},
handlePermissionClick(permission) {
this.setState({
selectedPermission: permission,
selectedDatabase: '',
});
},
handleDatabaseChange(e) {
this.setState({selectedDatabase: e.target.value});
},
handleSubmit(e) {
e.preventDefault();
this.props.onAddPermission({
name: this.state.selectedPermission,
resources: [this.state.selectedDatabase],
});
$('#addPermissionModal').modal('hide'); // eslint-disable-line no-undef
},
render() {
const {permissions} = this.props;
return (
<div className="modal fade" id="addPermissionModal" tabIndex="-1" role="dialog">
<div className="modal-dialog">
<div className="modal-content">
<div className="modal-header">
<button type="button" className="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
<h4 className="modal-title">Select a Permission to Add</h4>
</div>
<form onSubmit={this.handleSubmit}>
<div className="modal-body">
<div className="well permission-list">
<ul>
{permissions.map((perm) => {
return (
<li key={perm.name}>
<input onClick={() => this.handlePermissionClick(perm.name)} type="radio" name="permissionName" value={`${perm.name}`} id={`permission-${perm.name}`}></input>
<label htmlFor={`permission-${perm.name}`}>
{perm.displayName}
<br/>
<span className="permission-description">{perm.description}</span>
</label>
</li>
);
})}
</ul>
</div>
{this.renderOptions()}
</div>
{this.renderFooter()}
</form>
</div>
</div>
</div>
);
},
renderFooter() {
return (
<div className="modal-footer">
<button className="btn btn-default" data-dismiss="modal">Cancel</button>
<input disabled={!this.state.selectedPermission} className="btn btn-success" type="submit" value="Add Permission"></input>
</div>
);
},
renderOptions() {
return (
<div>
{this.state.selectedPermission ? this.renderDatabases() : null}
</div>
);
},
renderDatabases() {
const isClusterWide = CLUSTER_WIDE_PERMISSIONS.includes(this.state.selectedPermission);
if (!this.props.databases.length || isClusterWide) {
return null;
}
return (
<div>
<div className="form-grid">
<div className="form-group col-md-12">
<label htmlFor="#permissions-database">Limit Permission to...</label>
<select onChange={this.handleDatabaseChange} className="form-control" name="database" id="permissions-database">
<option value={''}>All Databases</option>
{this.props.databases.map((databaseName, i) => <option key={i}>{databaseName}</option>)}
</select>
</div>
</div>
</div>
);
},
});
export default AddPermissionModal;

View File

@ -60,7 +60,7 @@ export default function AutoRefresh(ComposedComponent) {
const newSeries = [];
queries.forEach(({host, database, rp, text}) => {
_fetchTimeSeries(host, database, rp, text).then((resp) => {
newSeries.push({identifier: host, response: resp.data});
newSeries.push({response: resp.data});
count += 1;
if (count === queries.length) {
this.setState({

View File

@ -1,24 +0,0 @@
import React from 'react';
const {node} = React.PropTypes;
const ClusterError = React.createClass({
propTypes: {
children: node.isRequired,
},
render() {
return (
<div className="container-fluid">
<div className="row">
<div className="col-sm-6 col-sm-offset-3">
<div className="panel panel-error panel-summer">
{this.props.children}
</div>
</div>
</div>
</div>
);
},
});
export default ClusterError;

View File

@ -131,6 +131,7 @@ export default React.createClass({
const timeSeries = this.getTimeSeries();
const {fields, yRange} = this.props;
dygraph.updateOptions({
labels: fields,
file: timeSeries,

View File

@ -1,21 +0,0 @@
import React from 'react';
import ClusterError from './ClusterError';
const InsufficientPermissions = React.createClass({
render() {
return (
<ClusterError>
<div className="panel-heading text-center">
<h2 className="deluxe">
{`Your account has insufficient permissions`}
</h2>
</div>
<div className="panel-body text-center">
<h3 className="deluxe">Talk to your admin to get additional permissions for access</h3>
</div>
</ClusterError>
);
},
});
export default InsufficientPermissions;

View File

@ -101,7 +101,7 @@ export const LayoutRenderer = React.createClass({
render() {
const layoutMargin = 4;
return (
<GridLayout layout={this.state.layout} isDraggable={false} isResizable={false} cols={12} rowHeight={83.5} margin={[layoutMargin, layoutMargin]} containerPadding={[0, 0]}>
<GridLayout layout={this.state.layout} isDraggable={false} isResizable={false} cols={12} rowHeight={83.5} margin={[layoutMargin, layoutMargin]} containerPadding={[0, 0]} useCSSTransforms={false} >
{this.generateGraphs()}
</GridLayout>
);

View File

@ -1,35 +0,0 @@
import React from 'react';
import errorCopy from 'hson!shared/copy/errors.hson';
const NoClusterError = React.createClass({
render() {
return (
<div>
<div className="container">
<div className="row">
<div className="col-sm-6 col-sm-offset-3">
<div className="panel panel-error panel-summer">
<div className="panel-heading text-center">
<h2 className="deluxe">
{errorCopy.noCluster.head}
</h2>
</div>
<div className="panel-body text-center">
<h3 className="deluxe">How to resolve:</h3>
<p>
{errorCopy.noCluster.body}
</p>
<div className="text-center">
<button className="btn btn-center btn-success" onClick={() => window.location.reload()}>My Cluster Is Back Up</button>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
);
},
});
export default NoClusterError;

View File

@ -1,27 +0,0 @@
import React from 'react';
const NoClusterLinksError = React.createClass({
render() {
return (
<div className="container-fluid">
<div className="row">
<div className="col-sm-6 col-sm-offset-3">
<div className="panel panel-error panel-summer">
<div className="panel-heading text-center">
<h2 className="deluxe">
This user is not associated with any cluster accounts!
</h2>
</div>
<div className="panel-body text-center">
<p>Many features in Chronograf require your user to be associated with a cluster account.</p>
<p>Ask an administrator to associate your user with a cluster account.</p>
</div>
</div>
</div>
</div>
</div>
);
},
});
export default NoClusterLinksError;

View File

@ -0,0 +1,22 @@
import React, {PropTypes} from 'react';
import {Link} from 'react-router';
const NoKapacitorError = React.createClass({
propTypes: {
source: PropTypes.shape({
id: PropTypes.string.isRequired,
}).isRequired,
},
render() {
const path = `/sources/${this.props.source.id}/kapacitor-config`;
return (
<div>
<p>The current source does not have an associated Kapacitor instance, please configure one.</p>
<Link to={path}>Add Kapacitor</Link>
</div>
);
},
});
export default NoKapacitorError;

View File

@ -1,76 +0,0 @@
import React, {PropTypes} from 'react';
const {arrayOf, shape, string} = PropTypes;
const PermissionsTable = React.createClass({
propTypes: {
permissions: PropTypes.arrayOf(shape({
name: string.isRequired,
displayName: string.isRequired,
description: string.isRequired,
resources: arrayOf(string.isRequired).isRequired,
})).isRequired,
showAddResource: PropTypes.bool,
onRemovePermission: PropTypes.func,
},
getDefaultProps() {
return {
permissions: [],
showAddResource: false,
};
},
handleAddResourceClick() {
// TODO
},
handleRemovePermission(permission) {
this.props.onRemovePermission(permission);
},
render() {
if (!this.props.permissions.length) {
return (
<div className="generic-empty-state">
<span className="icon alert-triangle"></span>
<h4>This Role has no Permissions</h4>
</div>
);
}
return (
<div className="panel-body">
<table className="table permissions-table">
<tbody>
{this.props.permissions.map((p) => (
<tr key={p.name}>
<td>{p.displayName}</td>
<td>
{p.resources.map((resource, i) => <div key={i} className="pill">{resource === '' ? 'All Databases' : resource}</div>)}
{this.props.showAddResource ? (
<div onClick={this.handleAddResourceClick} className="pill-add" data-toggle="modal" data-target="#addPermissionModal">
<span className="icon plus"></span>
</div>
) : null}
</td>
{this.props.onRemovePermission ? (
<td className="remove-permission">
<button
onClick={() => this.handleRemovePermission(p)}
type="button"
className="btn btn-sm btn-link-danger">
Remove
</button>
</td>
) : null}
</tr>
))}
</tbody>
</table>
</div>
);
},
});
export default PermissionsTable;

View File

@ -1,86 +0,0 @@
import React, {PropTypes} from 'react';
import {Link} from 'react-router';
import PermissionsTable from 'src/shared/components/PermissionsTable';
const {arrayOf, bool, func, shape, string} = PropTypes;
const RolePanels = React.createClass({
propTypes: {
roles: arrayOf(shape({
name: string.isRequired,
users: arrayOf(string.isRequired).isRequired,
permissions: arrayOf(shape({
name: string.isRequired,
displayName: string.isRequired,
description: string.isRequired,
resources: arrayOf(string.isRequired).isRequired,
})).isRequired,
})).isRequired,
showUserCount: bool,
onRemoveAccountFromRole: func,
},
getDefaultProps() {
return {
showUserCount: false,
};
},
render() {
const {roles} = this.props;
if (!roles.length) {
return (
<div className="panel panel-default">
<div className="panel-body">
<div className="generic-empty-state">
<span className="icon alert-triangle"></span>
<h4>This user has no roles</h4>
</div>
</div>
</div>
);
}
return (
<div className="panel-group sub-page" role="tablist">
{roles.map((role) => {
const id = role.name.replace(/[^\w]/gi, '');
return (
<div key={role.name} className="panel panel-default">
<div className="panel-heading" role="tab" id={`heading${id}`}>
<h4 className="panel-title u-flex u-ai-center u-jc-space-between">
<a className="collapsed" role="button" data-toggle="collapse" href={`#collapse-role-${id}`}>
<span className="caret"></span>
{role.name}
</a>
<div>
{this.props.showUserCount ? <p>{role.users ? role.users.length : 0} Users</p> : null}
{this.props.onRemoveAccountFromRole ? (
<button
onClick={() => this.props.onRemoveAccountFromRole(role)}
data-toggle="modal"
data-target="#removeAccountFromRoleModal"
type="button"
className="btn btn-sm btn-link">
Remove
</button>
) : null}
<Link to={`/roles/${encodeURIComponent(role.name)}`} className="btn btn-xs btn-link">
Go To Role
</Link>
</div>
</h4>
</div>
<div id={`collapse-role-${id}`} className="panel-collapse collapse" role="tabpanel">
<PermissionsTable permissions={role.permissions} />
</div>
</div>
);
})}
</div>
);
},
});
export default RolePanels;

View File

@ -1,95 +0,0 @@
import React, {PropTypes} from 'react';
import {Link} from 'react-router';
import classNames from 'classnames';
const {func, shape, arrayOf, string} = PropTypes;
const UsersTable = React.createClass({
propTypes: {
users: arrayOf(shape({}).isRequired).isRequired,
activeCluster: string.isRequired,
onUserToDelete: func.isRequired,
me: shape({}).isRequired,
deleteText: string,
},
getDefaultProps() {
return {
deleteText: 'Delete',
};
},
handleSelectUserToDelete(user) {
this.props.onUserToDelete(user);
},
render() {
const {users, activeCluster, me} = this.props;
if (!users.length) {
return (
<div className="generic-empty-state">
<span className="icon user-outline"/>
<h4>No users</h4>
</div>
);
}
return (
<table className="table v-center users-table">
<tbody>
<tr>
<th></th>
<th>Name</th>
<th>Admin</th>
<th>Email</th>
<th></th>
</tr>
{
users.map((user) => {
const isMe = me.id === user.id;
return (
<tr key={user.id}>
<td></td>
<td>
<span>
<Link to={`/clusters/${activeCluster}/users/${user.id}`} title={`Go to ${user.name}'s profile`}>{user.name}</Link>
{isMe ? <em> (You) </em> : null}
</span>
</td>
<td className="admin-column">{this.renderAdminIcon(user.admin)}</td>
<td>{user.email}</td>
<td>
{this.renderDeleteButton(user)}
</td>
</tr>
);
})
}
</tbody>
</table>
);
},
renderAdminIcon(isAdmin) {
return <span className={classNames("icon", {"checkmark text-color-success": isAdmin, "remove text-color-danger": !isAdmin})}></span>;
},
renderDeleteButton(user) {
if (this.props.me.id === user.id) {
return <button type="button" className="btn btn-sm btn-link-danger disabled" title={`Cannot ${this.props.deleteText} Yourself`}>{this.props.deleteText}</button>;
}
return (
<button
onClick={() => this.handleSelectUserToDelete({id: user.id, name: user.name})}
type="button"
data-toggle="modal"
data-target="#deleteUsersModal"
className="btn btn-sm btn-link-danger"
>
{this.props.deleteText}
</button>
);
},
});
export default UsersTable;

View File

@ -0,0 +1,7 @@
import me from './me';
import notifications from './notifications';
export {
me,
notifications,
};

View File

@ -0,0 +1,17 @@
function getInitialState() {
return {};
}
const initialState = getInitialState();
export default function me(state = initialState, action) {
switch (action.type) {
case 'ME_RECEIVED': {
return action.payload.me;
}
case 'LOGOUT': {
return {};
}
}
return state;
}

View File

@ -1,20 +1,25 @@
import React, {PropTypes} from 'react';
import {NavBar, NavBlock, NavHeader, NavListItem} from 'src/side_nav/components/NavItems';
const {string} = PropTypes;
const {string, shape} = PropTypes;
const SideNav = React.createClass({
propTypes: {
location: string.isRequired,
sourceID: string.isRequired,
explorationID: string,
me: shape({
email: string.isRequired,
}),
},
render() {
const {location, sourceID, explorationID} = this.props;
const {me, location, sourceID, explorationID} = this.props;
const sourcePrefix = `/sources/${sourceID}`;
const explorationSuffix = explorationID ? `/${explorationID}` : '';
const dataExplorerLink = `${sourcePrefix}/chronograf/data-explorer${explorationSuffix}`;
const loggedIn = !!(me && me.email);
return (
<NavBar location={location}>
<div className="sidebar__logo">
@ -39,6 +44,11 @@ const SideNav = React.createClass({
<NavListItem link={`${sourcePrefix}/manage-sources`}>InfluxDB</NavListItem>
<NavListItem link={`${sourcePrefix}/kapacitor-config`}>Kapacitor</NavListItem>
</NavBlock>
{loggedIn ? (
<NavBlock icon="user-outline" className="sidebar__square-last">
<a className="sidebar__menu-item" href="/oauth/logout">Logout</a>
</NavBlock>
) : null}
</NavBar>
);
},

View File

@ -1,41 +1,38 @@
import React, {PropTypes} from 'react';
import {connect} from 'react-redux';
import SideNav from '../components/SideNav';
const {func, string} = PropTypes;
const {func, string, shape} = PropTypes;
const SideNavApp = React.createClass({
propTypes: {
currentLocation: string.isRequired,
addFlashMessage: func.isRequired,
sourceID: string.isRequired,
explorationID: string,
},
contextTypes: {
canViewChronograf: PropTypes.bool,
},
getInitialState() {
return {
clusters: [],
clusterToUpdate: '',
};
me: shape({
email: string.isRequired,
}),
},
render() {
const {currentLocation, sourceID, explorationID} = this.props;
const {canViewChronograf} = this.context;
const {me, currentLocation, sourceID, explorationID} = this.props;
return (
<SideNav
sourceID={sourceID}
isAdmin={true}
canViewChronograf={canViewChronograf}
location={currentLocation}
explorationID={explorationID}
me={me}
/>
);
},
});
export default SideNavApp;
function mapStateToProps(state) {
return {
me: state.me,
};
}
export default connect(mapStateToProps)(SideNavApp);

View File

@ -22,6 +22,7 @@ export const CreateSource = React.createClass({
username: this.sourceUser.value,
password: this.sourcePassword.value,
isDefault: true,
telegraf: this.sourceTelegraf.value,
};
createSource(source).then(({data: sourceFromServer}) => {
this.redirectToApp(sourceFromServer);
@ -71,7 +72,10 @@ export const CreateSource = React.createClass({
<input ref={(r) => this.sourcePassword = r} className="form-control" id="password" type="password"></input>
</div>
</div>
<div className="form-group col-xs-8 col-xs-offset-2">
<label htmlFor="telegraf">Telegraf database</label>
<input ref={(r) => this.sourceTelegraf = r} className="form-control" id="telegraf" type="text" value="telegraf"></input>
</div>
<div className="form-group col-xs-12 text-center">
<button className="btn btn-success" type="submit">Create New Server</button>
</div>

View File

@ -44,6 +44,7 @@ export const SourceForm = React.createClass({
username: this.sourceUsername.value,
password: this.sourcePassword.value,
'default': this.sourceDefault.checked,
telegraf: this.sourceTelegraf.value,
});
if (this.state.editMode) {
updateSource(newSource).then(() => {
@ -117,6 +118,10 @@ export const SourceForm = React.createClass({
<label htmlFor="password">Password</label>
<input type="password" name="password" ref={(r) => this.sourcePassword = r} className="form-control" id="password" onChange={this.onInputChange} value={source.password || ''}></input>
</div>
<div className="form-group col-xs-8 col-xs-offset-2">
<label htmlFor="telegraf">Telegraf database</label>
<input type="text" name="telegraf" ref={(r) => this.sourceTelegraf = r} className="form-control" id="telegraf" onChange={this.onInputChange} value={source.telegraf || 'telegraf'}></input>
</div>
<div className="form-group col-xs-8 col-xs-offset-2">
<div className="form-control-static">
<input type="checkbox" id="defaultSourceCheckbox" defaultChecked={source.default} ref={(r) => this.sourceDefault = r} />

Some files were not shown because too many files have changed in this diff Show More