diff --git a/CHANGELOG.md b/CHANGELOG.md index bd3fe8926..58626f50a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ ## v1.1.0 [unreleased] +- #586: Allow telegraf database in non-default locations +- #576: Fix broken zoom on graphs that aren't the first. - #575: Add Varnish Layout - #574: Fix broken graphs on Postgres Layouts by adding aggregates. diff --git a/bolt/client.go b/bolt/client.go index 93f32d900..7f8c7f5e9 100644 --- a/bolt/client.go +++ b/bolt/client.go @@ -19,6 +19,7 @@ type Client struct { SourcesStore *SourcesStore ServersStore *ServersStore LayoutStore *LayoutStore + UsersStore *UsersStore AlertsStore *AlertsStore } @@ -28,6 +29,7 @@ func NewClient() *Client { c.SourcesStore = &SourcesStore{client: c} c.ServersStore = &ServersStore{client: c} c.AlertsStore = &AlertsStore{client: c} + c.UsersStore = &UsersStore{client: c} c.LayoutStore = &LayoutStore{ client: c, IDs: &uuid.V4{}, @@ -65,6 +67,10 @@ func (c *Client) Open() error { if _, err := tx.CreateBucketIfNotExists(AlertsBucket); err != nil { return err } + // Always create Users bucket. + if _, err := tx.CreateBucketIfNotExists(UsersBucket); err != nil { + return err + } return nil }); err != nil { return err diff --git a/bolt/internal/internal.go b/bolt/internal/internal.go index 2e6bbfb0c..67469f52b 100644 --- a/bolt/internal/internal.go +++ b/bolt/internal/internal.go @@ -51,6 +51,7 @@ func MarshalSource(s chronograf.Source) ([]byte, error) { Password: s.Password, URL: s.URL, Default: s.Default, + Telegraf: s.Telegraf, }) } @@ -68,6 +69,7 @@ func UnmarshalSource(data []byte, s *chronograf.Source) error { s.Password = pb.Password s.URL = pb.URL s.Default = pb.Default + s.Telegraf = pb.Telegraf return nil } @@ -203,3 +205,23 @@ func UnmarshalAlertRule(data []byte, r *ScopedAlert) error { r.KapaID = int(pb.KapaID) return nil } + +// MarshalUser encodes a user to binary protobuf format. +func MarshalUser(u *chronograf.User) ([]byte, error) { + return proto.Marshal(&User{ + ID: uint64(u.ID), + Email: u.Email, + }) +} + +// UnmarshalUser decodes a user from binary protobuf data. +func UnmarshalUser(data []byte, u *chronograf.User) error { + var pb User + if err := proto.Unmarshal(data, &pb); err != nil { + return err + } + + u.ID = chronograf.UserID(pb.ID) + u.Email = pb.Email + return nil +} diff --git a/bolt/internal/internal.pb.go b/bolt/internal/internal.pb.go index a4aee6b59..eaeb1404e 100644 --- a/bolt/internal/internal.pb.go +++ b/bolt/internal/internal.pb.go @@ -16,6 +16,7 @@ It has these top-level messages: Cell Query AlertRule + User */ package internal @@ -35,13 +36,13 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Exploration struct { - ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,json=name,proto3" json:"Name,omitempty"` - UserID int64 `protobuf:"varint,3,opt,name=UserID,json=userID,proto3" json:"UserID,omitempty"` - Data string `protobuf:"bytes,4,opt,name=Data,json=data,proto3" json:"Data,omitempty"` - CreatedAt int64 `protobuf:"varint,5,opt,name=CreatedAt,json=createdAt,proto3" json:"CreatedAt,omitempty"` - UpdatedAt int64 `protobuf:"varint,6,opt,name=UpdatedAt,json=updatedAt,proto3" json:"UpdatedAt,omitempty"` - Default bool `protobuf:"varint,7,opt,name=Default,json=default,proto3" json:"Default,omitempty"` + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` + UserID int64 `protobuf:"varint,3,opt,name=UserID,proto3" json:"UserID,omitempty"` + Data string `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"` + CreatedAt int64 `protobuf:"varint,5,opt,name=CreatedAt,proto3" json:"CreatedAt,omitempty"` + UpdatedAt int64 `protobuf:"varint,6,opt,name=UpdatedAt,proto3" json:"UpdatedAt,omitempty"` + Default bool `protobuf:"varint,7,opt,name=Default,proto3" json:"Default,omitempty"` } func (m *Exploration) Reset() { *m = Exploration{} } @@ -50,13 +51,14 @@ func (*Exploration) ProtoMessage() {} func (*Exploration) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} } type Source struct { - ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,json=name,proto3" json:"Name,omitempty"` - Type string `protobuf:"bytes,3,opt,name=Type,json=type,proto3" json:"Type,omitempty"` - Username string `protobuf:"bytes,4,opt,name=Username,json=username,proto3" json:"Username,omitempty"` - Password string `protobuf:"bytes,5,opt,name=Password,json=password,proto3" json:"Password,omitempty"` - URL string `protobuf:"bytes,6,opt,name=URL,json=uRL,proto3" json:"URL,omitempty"` - Default bool `protobuf:"varint,7,opt,name=Default,json=default,proto3" json:"Default,omitempty"` + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` + Type string `protobuf:"bytes,3,opt,name=Type,proto3" json:"Type,omitempty"` + Username string `protobuf:"bytes,4,opt,name=Username,proto3" json:"Username,omitempty"` + Password string `protobuf:"bytes,5,opt,name=Password,proto3" json:"Password,omitempty"` + URL string `protobuf:"bytes,6,opt,name=URL,proto3" json:"URL,omitempty"` + Default bool `protobuf:"varint,7,opt,name=Default,proto3" json:"Default,omitempty"` + Telegraf string `protobuf:"bytes,8,opt,name=Telegraf,proto3" json:"Telegraf,omitempty"` } func (m *Source) Reset() { *m = Source{} } @@ -65,12 +67,12 @@ func (*Source) ProtoMessage() {} func (*Source) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} } type Server struct { - ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,json=name,proto3" json:"Name,omitempty"` - Username string `protobuf:"bytes,3,opt,name=Username,json=username,proto3" json:"Username,omitempty"` - Password string `protobuf:"bytes,4,opt,name=Password,json=password,proto3" json:"Password,omitempty"` - URL string `protobuf:"bytes,5,opt,name=URL,json=uRL,proto3" json:"URL,omitempty"` - SrcID int64 `protobuf:"varint,6,opt,name=SrcID,json=srcID,proto3" json:"SrcID,omitempty"` + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` + Username string `protobuf:"bytes,3,opt,name=Username,proto3" json:"Username,omitempty"` + Password string `protobuf:"bytes,4,opt,name=Password,proto3" json:"Password,omitempty"` + URL string `protobuf:"bytes,5,opt,name=URL,proto3" json:"URL,omitempty"` + SrcID int64 `protobuf:"varint,6,opt,name=SrcID,proto3" json:"SrcID,omitempty"` } func (m *Server) Reset() { *m = Server{} } @@ -79,10 +81,10 @@ func (*Server) ProtoMessage() {} func (*Server) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} } type Layout struct { - ID string `protobuf:"bytes,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` - Application string `protobuf:"bytes,2,opt,name=Application,json=application,proto3" json:"Application,omitempty"` - Measurement string `protobuf:"bytes,3,opt,name=Measurement,json=measurement,proto3" json:"Measurement,omitempty"` - Cells []*Cell `protobuf:"bytes,4,rep,name=Cells,json=cells" json:"Cells,omitempty"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Application string `protobuf:"bytes,2,opt,name=Application,proto3" json:"Application,omitempty"` + Measurement string `protobuf:"bytes,3,opt,name=Measurement,proto3" json:"Measurement,omitempty"` + Cells []*Cell `protobuf:"bytes,4,rep,name=Cells" json:"Cells,omitempty"` } func (m *Layout) Reset() { *m = Layout{} } @@ -120,11 +122,11 @@ func (m *Cell) GetQueries() []*Query { } type Query struct { - Command string `protobuf:"bytes,1,opt,name=Command,json=command,proto3" json:"Command,omitempty"` - DB string `protobuf:"bytes,2,opt,name=DB,json=dB,proto3" json:"DB,omitempty"` - RP string `protobuf:"bytes,3,opt,name=RP,json=rP,proto3" json:"RP,omitempty"` - GroupBys []string `protobuf:"bytes,4,rep,name=GroupBys,json=groupBys" json:"GroupBys,omitempty"` - Wheres []string `protobuf:"bytes,5,rep,name=Wheres,json=wheres" json:"Wheres,omitempty"` + Command string `protobuf:"bytes,1,opt,name=Command,proto3" json:"Command,omitempty"` + DB string `protobuf:"bytes,2,opt,name=DB,proto3" json:"DB,omitempty"` + RP string `protobuf:"bytes,3,opt,name=RP,proto3" json:"RP,omitempty"` + GroupBys []string `protobuf:"bytes,4,rep,name=GroupBys" json:"GroupBys,omitempty"` + Wheres []string `protobuf:"bytes,5,rep,name=Wheres" json:"Wheres,omitempty"` } func (m *Query) Reset() { *m = Query{} } @@ -133,10 +135,10 @@ func (*Query) ProtoMessage() {} func (*Query) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} } type AlertRule struct { - ID string `protobuf:"bytes,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` - JSON string `protobuf:"bytes,2,opt,name=JSON,json=jSON,proto3" json:"JSON,omitempty"` - SrcID int64 `protobuf:"varint,3,opt,name=SrcID,json=srcID,proto3" json:"SrcID,omitempty"` - KapaID int64 `protobuf:"varint,4,opt,name=KapaID,json=kapaID,proto3" json:"KapaID,omitempty"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + JSON string `protobuf:"bytes,2,opt,name=JSON,proto3" json:"JSON,omitempty"` + SrcID int64 `protobuf:"varint,3,opt,name=SrcID,proto3" json:"SrcID,omitempty"` + KapaID int64 `protobuf:"varint,4,opt,name=KapaID,proto3" json:"KapaID,omitempty"` } func (m *AlertRule) Reset() { *m = AlertRule{} } @@ -144,6 +146,16 @@ func (m *AlertRule) String() string { return proto.CompactTextString( func (*AlertRule) ProtoMessage() {} func (*AlertRule) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} } +type User struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + Email string `protobuf:"bytes,2,opt,name=Email,proto3" json:"Email,omitempty"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{7} } + func init() { proto.RegisterType((*Exploration)(nil), "internal.Exploration") proto.RegisterType((*Source)(nil), "internal.Source") @@ -152,44 +164,45 @@ func init() { proto.RegisterType((*Cell)(nil), "internal.Cell") proto.RegisterType((*Query)(nil), "internal.Query") proto.RegisterType((*AlertRule)(nil), "internal.AlertRule") + proto.RegisterType((*User)(nil), "internal.User") } func init() { proto.RegisterFile("internal.proto", fileDescriptorInternal) } var fileDescriptorInternal = []byte{ - // 529 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x93, 0x4d, 0xae, 0xd3, 0x30, - 0x10, 0x80, 0xe5, 0x26, 0xce, 0x8f, 0x8b, 0x0a, 0xb2, 0x10, 0x8a, 0x10, 0x8b, 0x2a, 0x62, 0x51, - 0x36, 0x6f, 0x01, 0x27, 0x68, 0x1b, 0x84, 0x0a, 0xa5, 0xaf, 0xb8, 0x54, 0xac, 0x58, 0x98, 0xc4, - 0xd0, 0x40, 0xfe, 0x70, 0x6c, 0xda, 0x6c, 0xd9, 0xc2, 0x31, 0xb8, 0x01, 0x17, 0x44, 0xe3, 0x3a, - 0xa4, 0x12, 0xe8, 0xe9, 0x2d, 0xbf, 0x99, 0x49, 0xfc, 0xcd, 0x8c, 0x4d, 0x26, 0x79, 0xa5, 0x84, - 0xac, 0x78, 0x71, 0xd5, 0xc8, 0x5a, 0xd5, 0x34, 0xe8, 0x39, 0xfe, 0x8d, 0xc8, 0xf8, 0xf9, 0xa9, - 0x29, 0x6a, 0xc9, 0x55, 0x5e, 0x57, 0x74, 0x42, 0x46, 0xab, 0x24, 0x42, 0x53, 0x34, 0x73, 0xd8, - 0x28, 0x4f, 0x28, 0x25, 0xee, 0x86, 0x97, 0x22, 0x1a, 0x4d, 0xd1, 0x2c, 0x64, 0x6e, 0xc5, 0x4b, - 0x41, 0x1f, 0x10, 0x6f, 0xdf, 0x0a, 0xb9, 0x4a, 0x22, 0xc7, 0xd4, 0x79, 0xda, 0x10, 0xd4, 0x26, - 0x5c, 0xf1, 0xc8, 0x3d, 0xd7, 0x66, 0x5c, 0x71, 0xfa, 0x88, 0x84, 0x4b, 0x29, 0xb8, 0x12, 0xd9, - 0x5c, 0x45, 0xd8, 0x94, 0x87, 0x69, 0x1f, 0x80, 0xec, 0xbe, 0xc9, 0x6c, 0xd6, 0x3b, 0x67, 0x75, - 0x1f, 0xa0, 0x11, 0xf1, 0x13, 0xf1, 0x91, 0xeb, 0x42, 0x45, 0xfe, 0x14, 0xcd, 0x02, 0xe6, 0x67, - 0x67, 0x8c, 0x7f, 0x21, 0xe2, 0xed, 0x6a, 0x2d, 0x53, 0x71, 0x2b, 0x61, 0x4a, 0xdc, 0xb7, 0x5d, - 0x23, 0x8c, 0x6e, 0xc8, 0x5c, 0xd5, 0x35, 0x82, 0x3e, 0x24, 0x01, 0x34, 0x01, 0x79, 0x2b, 0x1c, - 0x68, 0xcb, 0x90, 0xdb, 0xf2, 0xb6, 0x3d, 0xd6, 0x32, 0x33, 0xce, 0x21, 0x0b, 0x1a, 0xcb, 0xf4, - 0x1e, 0x71, 0xf6, 0x6c, 0x6d, 0x64, 0x43, 0xe6, 0x68, 0xb6, 0xbe, 0x41, 0xf3, 0x27, 0x68, 0x0a, - 0xf9, 0x4d, 0xc8, 0x5b, 0x69, 0x5e, 0x2a, 0x39, 0x37, 0x28, 0xb9, 0xff, 0x57, 0xc2, 0x83, 0xd2, - 0x7d, 0x82, 0x77, 0x32, 0x5d, 0x25, 0x76, 0xa6, 0xb8, 0x05, 0x88, 0xbf, 0x23, 0xe2, 0xad, 0x79, - 0x57, 0x6b, 0x75, 0xa1, 0x13, 0x1a, 0x9d, 0x29, 0x19, 0xcf, 0x9b, 0xa6, 0xc8, 0x53, 0x73, 0x0b, - 0xac, 0xd5, 0x98, 0x0f, 0x21, 0xa8, 0x78, 0x2d, 0x78, 0xab, 0xa5, 0x28, 0x45, 0xa5, 0xac, 0xdf, - 0xb8, 0x1c, 0x42, 0xf4, 0x31, 0xc1, 0x4b, 0x51, 0x14, 0x6d, 0xe4, 0x4e, 0x9d, 0xd9, 0xf8, 0xe9, - 0xe4, 0xea, 0xef, 0xa5, 0x83, 0x30, 0xc3, 0x29, 0x24, 0xe3, 0x1f, 0x88, 0xb8, 0xc0, 0xf4, 0x0e, - 0x41, 0x27, 0x63, 0x80, 0x19, 0x3a, 0x01, 0x75, 0xe6, 0x58, 0xcc, 0x50, 0x07, 0x74, 0x34, 0x47, - 0x60, 0x86, 0x8e, 0x40, 0x07, 0xd3, 0x34, 0x66, 0xe8, 0x40, 0x9f, 0x10, 0xff, 0xab, 0x16, 0x32, - 0x17, 0x6d, 0x84, 0xcd, 0x41, 0x77, 0x87, 0x83, 0xde, 0x68, 0x21, 0x3b, 0xd6, 0xe7, 0xe1, 0xc3, - 0xdc, 0x6e, 0x0a, 0xe5, 0x30, 0x72, 0x33, 0x5a, 0x7f, 0x18, 0x79, 0xac, 0x09, 0x36, 0xdf, 0xc0, - 0x12, 0x97, 0x75, 0x59, 0xf2, 0x2a, 0xb3, 0x53, 0xf1, 0xd3, 0x33, 0xc2, 0xa8, 0x92, 0x85, 0x9d, - 0xc8, 0x28, 0x5b, 0x00, 0xb3, 0xad, 0xed, 0x7f, 0x24, 0xb7, 0xb0, 0x99, 0x17, 0xb2, 0xd6, 0xcd, - 0xa2, 0x3b, 0x77, 0x1e, 0xb2, 0xe0, 0x93, 0x65, 0x78, 0x29, 0xef, 0x0e, 0x42, 0x5a, 0xd5, 0x90, - 0x79, 0x47, 0x43, 0xf1, 0x7b, 0x12, 0xce, 0x0b, 0x21, 0x15, 0xd3, 0x85, 0xf8, 0x67, 0x17, 0x94, - 0xb8, 0x2f, 0x77, 0xd7, 0x9b, 0xfe, 0x6a, 0x7c, 0xde, 0x5d, 0x6f, 0x86, 0x85, 0x3a, 0x17, 0x0b, - 0x85, 0xdf, 0xbf, 0xe2, 0x0d, 0x5f, 0x25, 0x66, 0x3a, 0x0e, 0xf3, 0xbe, 0x18, 0xfa, 0xe0, 0x99, - 0x57, 0xfe, 0xec, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x02, 0xe9, 0x30, 0xf7, 0x03, 0x00, - 0x00, + // 541 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x94, 0x4b, 0x8e, 0xd3, 0x4c, + 0x10, 0xc7, 0xd5, 0xb1, 0x3b, 0x89, 0x2b, 0x9f, 0xf2, 0xa1, 0xd6, 0x08, 0x59, 0x88, 0x45, 0x64, + 0xb1, 0x08, 0x12, 0x9a, 0x05, 0x9c, 0x20, 0x89, 0x47, 0x28, 0x30, 0x0c, 0xa1, 0x33, 0x11, 0x2b, + 0x16, 0x4d, 0x52, 0x43, 0x2c, 0x39, 0xb6, 0x69, 0xdb, 0x24, 0xde, 0xb2, 0x85, 0xdb, 0x70, 0x01, + 0x8e, 0x86, 0xaa, 0xdd, 0x76, 0x2c, 0xf1, 0xd0, 0xec, 0xea, 0x5f, 0x55, 0xae, 0xfe, 0xd5, 0x23, + 0x81, 0x71, 0x94, 0x14, 0xa8, 0x13, 0x15, 0x5f, 0x66, 0x3a, 0x2d, 0x52, 0x31, 0x6c, 0x74, 0xf0, + 0x83, 0xc1, 0xe8, 0xea, 0x94, 0xc5, 0xa9, 0x56, 0x45, 0x94, 0x26, 0x62, 0x0c, 0xbd, 0x65, 0xe8, + 0xb3, 0x09, 0x9b, 0x3a, 0xb2, 0xb7, 0x0c, 0x85, 0x00, 0xf7, 0x46, 0x1d, 0xd0, 0xef, 0x4d, 0xd8, + 0xd4, 0x93, 0xc6, 0x16, 0x0f, 0xa1, 0xbf, 0xc9, 0x51, 0x2f, 0x43, 0xdf, 0x31, 0x79, 0x56, 0x51, + 0x6e, 0xa8, 0x0a, 0xe5, 0xbb, 0x75, 0x2e, 0xd9, 0xe2, 0x31, 0x78, 0x0b, 0x8d, 0xaa, 0xc0, 0xdd, + 0xac, 0xf0, 0xb9, 0x49, 0x3f, 0x3b, 0x28, 0xba, 0xc9, 0x76, 0x36, 0xda, 0xaf, 0xa3, 0xad, 0x43, + 0xf8, 0x30, 0x08, 0xf1, 0x4e, 0x95, 0x71, 0xe1, 0x0f, 0x26, 0x6c, 0x3a, 0x94, 0x8d, 0x0c, 0x7e, + 0x32, 0xe8, 0xaf, 0xd3, 0x52, 0x6f, 0xf1, 0x5e, 0xc0, 0x02, 0xdc, 0xdb, 0x2a, 0x43, 0x83, 0xeb, + 0x49, 0x63, 0x8b, 0x47, 0x30, 0x24, 0xec, 0x84, 0x72, 0x6b, 0xe0, 0x56, 0x53, 0x6c, 0xa5, 0xf2, + 0xfc, 0x98, 0xea, 0x9d, 0x61, 0xf6, 0x64, 0xab, 0xc5, 0x03, 0x70, 0x36, 0xf2, 0xda, 0xc0, 0x7a, + 0x92, 0xcc, 0xbf, 0x63, 0x52, 0x9d, 0x5b, 0x8c, 0xf1, 0x93, 0x56, 0x77, 0xfe, 0xb0, 0xae, 0xd3, + 0xe8, 0xe0, 0x3b, 0xb5, 0x80, 0xfa, 0x0b, 0xea, 0x7b, 0xb5, 0xd0, 0xc5, 0x75, 0xfe, 0x81, 0xeb, + 0xfe, 0x19, 0x97, 0x9f, 0x71, 0x2f, 0x80, 0xaf, 0xf5, 0x76, 0x19, 0xda, 0x79, 0xd7, 0x22, 0xf8, + 0xca, 0xa0, 0x7f, 0xad, 0xaa, 0xb4, 0x2c, 0x3a, 0x38, 0x9e, 0xc1, 0x99, 0xc0, 0x68, 0x96, 0x65, + 0x71, 0xb4, 0x35, 0x17, 0x62, 0xa9, 0xba, 0x2e, 0xca, 0x78, 0x83, 0x2a, 0x2f, 0x35, 0x1e, 0x30, + 0x29, 0x2c, 0x5f, 0xd7, 0x25, 0x9e, 0x00, 0x5f, 0x60, 0x1c, 0xe7, 0xbe, 0x3b, 0x71, 0xa6, 0xa3, + 0xe7, 0xe3, 0xcb, 0xf6, 0x20, 0xc9, 0x2d, 0xeb, 0x60, 0xf0, 0x8d, 0x81, 0x4b, 0x96, 0xf8, 0x0f, + 0xd8, 0xc9, 0x10, 0x70, 0xc9, 0x4e, 0xa4, 0x2a, 0xf3, 0x2c, 0x97, 0xac, 0x22, 0x75, 0x34, 0x4f, + 0x70, 0xc9, 0x8e, 0xa4, 0xf6, 0xa6, 0x69, 0x2e, 0xd9, 0x5e, 0x3c, 0x85, 0xc1, 0xe7, 0x12, 0x75, + 0x84, 0xb9, 0xcf, 0xcd, 0x43, 0xff, 0x9f, 0x1f, 0x7a, 0x57, 0xa2, 0xae, 0x64, 0x13, 0xa7, 0x0f, + 0x23, 0xbb, 0x45, 0x16, 0xd1, 0xc8, 0xcd, 0x68, 0x07, 0xf5, 0xc8, 0xc9, 0x0e, 0x4a, 0xe0, 0xe6, + 0x1b, 0x5a, 0xf0, 0x22, 0x3d, 0x1c, 0x54, 0xb2, 0xb3, 0x53, 0x69, 0x24, 0x8d, 0x2a, 0x9c, 0xdb, + 0x89, 0xf4, 0xc2, 0x39, 0x69, 0xb9, 0xb2, 0xfd, 0xf7, 0xe4, 0x8a, 0x36, 0xf3, 0x52, 0xa7, 0x65, + 0x36, 0xaf, 0xea, 0xce, 0x3d, 0xd9, 0x6a, 0xfa, 0x15, 0xbd, 0xdf, 0xa3, 0xb6, 0xa8, 0x9e, 0xb4, + 0x2a, 0xf8, 0x00, 0xde, 0x2c, 0x46, 0x5d, 0xc8, 0x32, 0xc6, 0xdf, 0x76, 0x21, 0xc0, 0x7d, 0xb5, + 0x7e, 0x7b, 0xd3, 0x9c, 0x06, 0xd9, 0xe7, 0x85, 0x3a, 0x9d, 0x85, 0x52, 0xf9, 0xd7, 0x2a, 0x53, + 0xcb, 0xd0, 0x4c, 0xc7, 0x91, 0x56, 0x05, 0xcf, 0xc0, 0xa5, 0xc3, 0xe9, 0x54, 0x76, 0x4d, 0xe5, + 0x0b, 0xe0, 0x57, 0x07, 0x15, 0xc5, 0xb6, 0x74, 0x2d, 0x3e, 0xf6, 0xcd, 0xff, 0xc5, 0x8b, 0x5f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x2c, 0x30, 0x90, 0x41, 0x04, 0x00, 0x00, } diff --git a/bolt/internal/internal.proto b/bolt/internal/internal.proto index 189db0b6a..213858f50 100644 --- a/bolt/internal/internal.proto +++ b/bolt/internal/internal.proto @@ -2,32 +2,33 @@ syntax = "proto3"; package internal; message Exploration { - int64 ID = 1; // ExplorationID is a unique ID for an Exploration. - string Name = 2; // User provided name of the Exploration. - int64 UserID = 3; // UserID is the owner of this Exploration. - string Data = 4; // Opaque blob of JSON data. - int64 CreatedAt = 5; // Time the exploration was first created. - int64 UpdatedAt = 6; // Latest time the exploration was updated. - bool Default = 7; // Flags an exploration as the default. + int64 ID = 1; // ExplorationID is a unique ID for an Exploration. + string Name = 2; // User provided name of the Exploration. + int64 UserID = 3; // UserID is the owner of this Exploration. + string Data = 4; // Opaque blob of JSON data. + int64 CreatedAt = 5; // Time the exploration was first created. + int64 UpdatedAt = 6; // Latest time the exploration was updated. + bool Default = 7; // Flags an exploration as the default. } message Source { - int64 ID = 1; // ID is the unique ID of the source - string Name = 2; // Name is the user-defined name for the source - string Type = 3; // Type specifies which kinds of source (enterprise vs oss) - string Username = 4; // Username is the username to connect to the source - string Password = 5; - string URL = 6; // URL are the connections to the source - bool Default = 7; // Flags an exploration as the default. + int64 ID = 1; // ID is the unique ID of the source + string Name = 2; // Name is the user-defined name for the source + string Type = 3; // Type specifies which kinds of source (enterprise vs oss) + string Username = 4; // Username is the username to connect to the source + string Password = 5; + string URL = 6; // URL are the connections to the source + bool Default = 7; // Flags an exploration as the default. + string Telegraf = 8; // Telegraf is the db telegraf is written to. By default it is "telegraf" } message Server { - int64 ID = 1; // ID is the unique ID of the server - string Name = 2; // Name is the user-defined name for the server - string Username = 3; // Username is the username to connect to the server - string Password = 4; - string URL = 5; // URL is the path to the server - int64 SrcID = 6; // SrcID is the ID of the data source + int64 ID = 1; // ID is the unique ID of the server + string Name = 2; // Name is the user-defined name for the server + string Username = 3; // Username is the username to connect to the server + string Password = 4; + string URL = 5; // URL is the path to the server + int64 SrcID = 6; // SrcID is the ID of the data source } message Layout { @@ -38,7 +39,7 @@ message Layout { } message Cell { - int32 x = 1; // X-coordinate of Cell in the Layout + int32 x = 1; // X-coordinate of Cell in the Layout int32 y = 2; // Y-coordinate of Cell in the Layout int32 w = 3; // Width of Cell in the Layout int32 h = 4; // Height of Cell in the Layout @@ -61,3 +62,8 @@ message AlertRule { int64 SrcID = 3; // SrcID is the id of the source this alert is associated with int64 KapaID = 4; // KapaID is the id of the kapacitor this alert is associated with } + +message User { + uint64 ID = 1; // ID is the unique ID of this user + string Email = 2; // Email byte representation of the user +} diff --git a/bolt/internal/internal_test.go b/bolt/internal/internal_test.go index e0d321996..ebf1bbafe 100644 --- a/bolt/internal/internal_test.go +++ b/bolt/internal/internal_test.go @@ -39,6 +39,7 @@ func TestMarshalSource(t *testing.T) { Password: "1 point twenty-one g1g@w@tts", URL: "http://twin-pines.mall.io:8086", Default: true, + Telegraf: "telegraf", } var vv chronograf.Source diff --git a/bolt/users.go b/bolt/users.go new file mode 100644 index 000000000..b2376c7b9 --- /dev/null +++ b/bolt/users.go @@ -0,0 +1,129 @@ +package bolt + +import ( + "context" + + "github.com/boltdb/bolt" + "github.com/influxdata/chronograf" + "github.com/influxdata/chronograf/bolt/internal" +) + +// Ensure UsersStore implements chronograf.UsersStore. +var _ chronograf.UsersStore = &UsersStore{} + +var UsersBucket = []byte("Users") + +type UsersStore struct { + client *Client +} + +// FindByEmail searches the UsersStore for all users owned with the email +func (s *UsersStore) FindByEmail(ctx context.Context, email string) (*chronograf.User, error) { + var user chronograf.User + err := s.client.db.View(func(tx *bolt.Tx) error { + err := tx.Bucket(UsersBucket).ForEach(func(k, v []byte) error { + var u chronograf.User + if err := internal.UnmarshalUser(v, &u); err != nil { + return err + } else if u.Email != email { + return nil + } + user.Email = u.Email + user.ID = u.ID + return nil + }) + if err != nil { + return err + } + if user.ID == 0 { + return chronograf.ErrUserNotFound + } + return nil + }) + if err != nil { + return nil, err + } + + return &user, nil +} + +// Create a new Users in the UsersStore. +func (s *UsersStore) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) { + if err := s.client.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(UsersBucket) + seq, err := b.NextSequence() + if err != nil { + return err + } + u.ID = chronograf.UserID(seq) + + if v, err := internal.MarshalUser(u); err != nil { + return err + } else if err := b.Put(itob(int(u.ID)), v); err != nil { + return err + } + return nil + }); err != nil { + return nil, err + } + + return u, nil +} + +// Delete the users from the UsersStore +func (s *UsersStore) Delete(ctx context.Context, u *chronograf.User) error { + if err := s.client.db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket(UsersBucket).Delete(itob(int(u.ID))); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + return nil +} + +// Get retrieves a user by id. +func (s *UsersStore) Get(ctx context.Context, id chronograf.UserID) (*chronograf.User, error) { + var u chronograf.User + if err := s.client.db.View(func(tx *bolt.Tx) error { + if v := tx.Bucket(UsersBucket).Get(itob(int(id))); v == nil { + return chronograf.ErrUserNotFound + } else if err := internal.UnmarshalUser(v, &u); err != nil { + return err + } + return nil + }); err != nil { + return nil, err + } + + return &u, nil +} + +// Update a user +func (s *UsersStore) Update(ctx context.Context, usr *chronograf.User) error { + if err := s.client.db.Update(func(tx *bolt.Tx) error { + // Retrieve an existing user with the same ID. + var u chronograf.User + b := tx.Bucket(UsersBucket) + if v := b.Get(itob(int(usr.ID))); v == nil { + return chronograf.ErrUserNotFound + } else if err := internal.UnmarshalUser(v, &u); err != nil { + return err + } + + u.Email = usr.Email + + if v, err := internal.MarshalUser(&u); err != nil { + return err + } else if err := b.Put(itob(int(u.ID)), v); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + return nil +} diff --git a/canned/apache.json b/canned/apache.json index cb7f9d201..6debd955d 100644 --- a/canned/apache.json +++ b/canned/apache.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"BytesPerSec\")) AS \"bytes_per_sec\" FROM apache", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], @@ -32,8 +30,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"ReqPerSec\")) AS \"req_per_sec\" FROM apache", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], @@ -51,8 +47,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"TotalAccesses\")) AS \"tot_access\" FROM apache", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], diff --git a/canned/consul.json b/canned/consul.json index a64f96ceb..399919598 100644 --- a/canned/consul.json +++ b/canned/consul.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT count(\"check_id\") as \"Number Critical\" FROM consul_health_checks", - "db": "telegraf", - "rp": "", "groupbys": [ "\"service_name\"" ], @@ -34,8 +32,6 @@ "queries": [ { "query": "SELECT count(\"check_id\") as \"Number Warning\" FROM consul_health_checks", - "db": "telegraf", - "rp": "", "groupbys": [ "\"service_name\"" ], diff --git a/canned/cpu.json b/canned/cpu.json index af775c8a9..f72023cf4 100644 --- a/canned/cpu.json +++ b/canned/cpu.json @@ -12,9 +12,7 @@ "name": "CPU Usage", "queries": [ { - "query": "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"telegraf\"..\"cpu\"", - "db": "telegraf", - "rp": "", + "query": "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"", "groupbys": [], "wheres": [] } diff --git a/canned/disk.json b/canned/disk.json index fbe2b5422..186637799 100644 --- a/canned/disk.json +++ b/canned/disk.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"used_percent\") AS \"used_percent\" FROM disk", - "db": "telegraf", - "rp": "", "groupbys": [ "\"path\"" ], diff --git a/canned/docker.json b/canned/docker.json index d68ff4b3b..d056f57d8 100644 --- a/canned/docker.json +++ b/canned/docker.json @@ -1,36 +1,41 @@ - { - "id": "0e980b97-c162-487b-a815-3f955df6243f", - "measurement": "docker", - "app": "docker", - "cells": [{ - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef22", - "name": "Docker - Container CPU", - "queries": [{ - "query": "SELECT mean(\"usage_percent\") AS \"usage_percent\" FROM \"docker_container_cpu\"", - "db": "telegraf", - "rp": "", - "groupbys": ["\"container_name\""], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4c79cefb-5152-410c-9b88-74f9bff7ef00", - "name": "Docker - Container Memory", - "queries": [{ - "query": "SELECT mean(\"usage\") AS \"usage\" FROM \"docker_container_mem\"", - "db": "telegraf", - "rp": "", - "groupbys": ["\"container_name\""], - "wheres": [] - }] - }] - - } +{ + "id": "0e980b97-c162-487b-a815-3f955df6243f", + "measurement": "docker", + "app": "docker", + "cells": [ + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "4c79cefb-5152-410c-9b88-74f9bff7ef22", + "name": "Docker - Container CPU", + "queries": [ + { + "query": "SELECT mean(\"usage_percent\") AS \"usage_percent\" FROM \"docker_container_cpu\"", + "groupbys": [ + "\"container_name\"" + ], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "4c79cefb-5152-410c-9b88-74f9bff7ef00", + "name": "Docker - Container Memory", + "queries": [ + { + "query": "SELECT mean(\"usage\") AS \"usage\" FROM \"docker_container_mem\"", + "groupbys": [ + "\"container_name\"" + ], + "wheres": [] + } + ] + } + ] +} diff --git a/canned/elasticsearch.json b/canned/elasticsearch.json index d9a4e0b49..69986242a 100644 --- a/canned/elasticsearch.json +++ b/canned/elasticsearch.json @@ -13,8 +13,6 @@ "queries": [ { "query": "select non_negative_derivative(mean(search_query_total)) as searches_per_min, non_negative_derivative(mean(search_scroll_total)) as scrolls_per_min, non_negative_derivative(mean(search_fetch_total)) as fetches_per_min, non_negative_derivative(mean(search_suggest_total)) as suggests_per_min from elasticsearch_indices", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -30,8 +28,6 @@ "queries": [ { "query": "select mean(current_open) from elasticsearch_http", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -47,8 +43,6 @@ "queries": [ { "query": "select non_negative_derivative(mean(search_query_time_in_millis)) as mean, non_negative_derivative(median(search_query_time_in_millis)) as median, non_negative_derivative(percentile(search_query_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -64,8 +58,6 @@ "queries": [ { "query": "select non_negative_derivative(mean(search_fetch_time_in_millis)) as mean, non_negative_derivative(median(search_fetch_time_in_millis)) as median, non_negative_derivative(percentile(search_fetch_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -81,8 +73,6 @@ "queries": [ { "query": "select non_negative_derivative(mean(search_suggest_time_in_millis)) as mean, non_negative_derivative(median(search_suggest_time_in_millis)) as median, non_negative_derivative(percentile(search_suggest_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -98,8 +88,6 @@ "queries": [ { "query": "select non_negative_derivative(mean(search_scroll_time_in_millis)) as mean, non_negative_derivative(median(search_scroll_time_in_millis)) as median, non_negative_derivative(percentile(search_scroll_time_in_millis, 95)) as ninety_fifth from elasticsearch_indices", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -115,8 +103,6 @@ "queries": [ { "query": "select non_negative_derivative(mean(indexing_index_time_in_millis)) as mean from elasticsearch_indices", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -132,8 +118,6 @@ "queries": [ { "query": "select mean(gc_collectors_old_collection_count) as old_count, mean(gc_collectors_young_collection_count) as young_count from elasticsearch_jvm", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -149,8 +133,6 @@ "queries": [ { "query": "select non_negative_derivative(mean(gc_collectors_old_collection_time_in_millis)) as mean_old_time, non_negative_derivative(mean(gc_collectors_young_collection_time_in_millis)) as mean_young_time from elasticsearch_jvm", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -166,8 +148,6 @@ "queries": [ { "query": "select mean(mem_heap_used_percent) from elasticsearch_jvm", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/haproxy.json b/canned/haproxy.json index 9648f81d2..9e2ebef62 100644 --- a/canned/haproxy.json +++ b/canned/haproxy.json @@ -13,8 +13,6 @@ "queries": [ { "query": "select mean(\"active_servers\") AS active_servers, mean(\"backup_servers\") AS backup_servers FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -30,8 +28,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(last(\"http_response.2xx\"), 1s) AS \"2xx\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -47,8 +43,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(last(\"http_response.4xx\"), 1s) AS \"4xx\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -64,8 +58,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(last(\"http_response.5xx\"), 1s) AS \"5xx\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -81,8 +73,6 @@ "queries": [ { "query": "SELECT mean(\"req_rate\") AS \"requests_per_second\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -98,8 +88,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"rate\")) AS \"sessions_per_second\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -115,8 +103,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"scur\")) / non_negative_derivative(max(\"slim\")) * 100 AS \"session_usage_percent\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -132,8 +118,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"dreq\")) AS \"denials_per_second\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -149,8 +133,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"ereq\")) AS \"errors_per_second\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -166,15 +148,11 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"bin\")) AS \"bytes_in_per_second\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] }, { "query": "SELECT non_negative_derivative(max(\"bout\")) AS \"bytes_out_per_second\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -190,8 +168,6 @@ "queries": [ { "query": "SELECT max(\"rtime\") AS \"response_time\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -207,8 +183,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"econ\")) AS \"errors_per_second\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -224,8 +198,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"qcur\")) AS \"queued_per_second\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -241,8 +213,6 @@ "queries": [ { "query": "SELECT max(\"qtime\") AS \"queue_time\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -258,8 +228,6 @@ "queries": [ { "query": "SELECT max(\"eresp\") AS \"error_response_rate\" FROM haproxy", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/influxdb_database.json b/canned/influxdb_database.json index 5bd03464f..91e16b266 100644 --- a/canned/influxdb_database.json +++ b/canned/influxdb_database.json @@ -13,18 +13,12 @@ "queries": [ { "query": "SELECT max(\"numMeasurements\") AS \"measurements\" FROM \"influxdb_database\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] }, { "query": "SELECT max(\"numSeries\") AS \"series\" FROM \"influxdb_database\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] } ] diff --git a/canned/influxdb_httpd.json b/canned/influxdb_httpd.json index e5d434256..2158fa1eb 100644 --- a/canned/influxdb_httpd.json +++ b/canned/influxdb_httpd.json @@ -13,10 +13,7 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"writeReq\"), 1s) AS \"http_requests\" FROM \"influxdb_httpd\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] } ] @@ -31,10 +28,7 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"queryReq\"), 1s) AS \"query_requests\" FROM \"influxdb_httpd\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] } ] @@ -49,18 +43,12 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"clientError\"), 1s) AS \"client_errors\" FROM \"influxdb_httpd\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] }, { "query": "SELECT non_negative_derivative(max(\"authFail\"), 1s) AS \"auth_fail\" FROM \"influxdb_httpd\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] } ] diff --git a/canned/influxdb_queryExecutor.json b/canned/influxdb_queryExecutor.json index fef1d8f90..867c7706d 100644 --- a/canned/influxdb_queryExecutor.json +++ b/canned/influxdb_queryExecutor.json @@ -13,18 +13,12 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"queryDurationNs\"), 1s) AS \"duration\" FROM \"influxdb_queryExecutor\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] }, { "query": "SELECT non_negative_derivative(max(\"queriesExecuted\"), 1s) AS \"queries_executed\" FROM \"influxdb_queryExecutor\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] } ] diff --git a/canned/influxdb_write.json b/canned/influxdb_write.json index 821019675..e1845955f 100644 --- a/canned/influxdb_write.json +++ b/canned/influxdb_write.json @@ -13,10 +13,7 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"pointReq\"), 1s) AS \"points_written\" FROM \"influxdb_write\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] } ] @@ -31,18 +28,12 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"writeError\"), 1s) AS \"shard_write_error\" FROM \"influxdb_write\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] }, { "query": "SELECT non_negative_derivative(max(\"serveError\"), 1s) AS \"http_error\" FROM \"influxdb_httpd\"", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] } ] diff --git a/canned/kubernetes_node.json b/canned/kubernetes_node.json index f1e76c626..ed525d088 100644 --- a/canned/kubernetes_node.json +++ b/canned/kubernetes_node.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_node", - "db": "telegraf", - "rp": "", "groupbys": [ "\"node_name\"" ], @@ -32,8 +30,6 @@ "queries": [ { "query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_node", - "db": "telegraf", - "rp": "", "groupbys": [ "\"node_name\"" ], diff --git a/canned/kubernetes_pod_container.json b/canned/kubernetes_pod_container.json index d51124e22..c0d70e643 100644 --- a/canned/kubernetes_pod_container.json +++ b/canned/kubernetes_pod_container.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_pod_container", - "db": "telegraf", - "rp": "", "groupbys": [ "\"pod_name\"" ], @@ -32,8 +30,6 @@ "queries": [ { "query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_pod_container", - "db": "telegraf", - "rp": "", "groupbys": [ "\"pod_name\"" ], diff --git a/canned/kubernetes_pod_network.json b/canned/kubernetes_pod_network.json index 4c3e3dddc..247b6412e 100644 --- a/canned/kubernetes_pod_network.json +++ b/canned/kubernetes_pod_network.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"tx_bytes\")) AS \"tx_bytes_per_second\" FROM kubernetes_pod_network", - "db": "telegraf", - "rp": "", "groupbys": [ "\"pod_name\"", "\"host\"" @@ -33,8 +31,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"rx_bytes\")) AS \"rx_bytes_per_second\" FROM kubernetes_pod_network", - "db": "telegraf", - "rp": "", "groupbys": [ "\"pod_name\"", "\"host\"" diff --git a/canned/kubernetes_system_container.json b/canned/kubernetes_system_container.json index 2b7ad5d32..da996d33f 100644 --- a/canned/kubernetes_system_container.json +++ b/canned/kubernetes_system_container.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"cpu_usage_nanocores\") / 1000000 AS \"cpu_usage_millicores\" FROM kubernetes_system_container", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [ "\"container_name\" = 'kubelet'" @@ -32,8 +30,6 @@ "queries": [ { "query": "SELECT mean(\"memory_usage_bytes\") AS \"memory_usage_bytes\" FROM kubernetes_system_container", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [ "\"container_name\" = 'kubelet'" diff --git a/canned/load.json b/canned/load.json index 85a5917b9..5034628f1 100644 --- a/canned/load.json +++ b/canned/load.json @@ -12,9 +12,7 @@ "name": "System Load", "queries": [ { - "query": "SELECT mean(\"load1\") AS \"load\" FROM \"telegraf\"..\"system\"", - "db": "telegraf", - "rp": "", + "query": "SELECT mean(\"load1\") AS \"load\" FROM \"system\"", "groupbys": [], "wheres": [] } diff --git a/canned/mem.json b/canned/mem.json index 12db4943c..1c67767af 100644 --- a/canned/mem.json +++ b/canned/mem.json @@ -12,9 +12,7 @@ "name": "System - Memory Bytes Used", "queries": [ { - "query": "SELECT mean(\"used\") AS \"used\", mean(\"available\") AS \"available\" FROM \"telegraf\"..\"mem\"", - "db": "telegraf", - "rp": "", + "query": "SELECT mean(\"used\") AS \"used\", mean(\"available\") AS \"available\" FROM \"mem\"", "groupbys": [], "wheres": [] } diff --git a/canned/memcached.json b/canned/memcached.json index 71fce2fa0..00e79aa6d 100644 --- a/canned/memcached.json +++ b/canned/memcached.json @@ -1,201 +1,202 @@ - { - "id": "f280c8c7-0530-425c-b281-788d8ded7676", - "measurement": "memcached", - "app": "memcached", - "cells": [{ - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af490", - "name": "Memcached - Current Connections", - "queries": [{ - "query": "SELECT max(\"curr_connections\") AS \"current_connections\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af400", - "name": "Memcached - Get Hits/Second", - "queries": [{ - "query": "SELECT non_negative_derivative(max(\"get_hits\"), 1s) AS \"get_hits\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af405", - "name": "Memcached - Get Misses/Second", - "queries": [{ - "query": "SELECT non_negative_derivative(max(\"get_misses\"), 1s) AS \"get_misses\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af413", - "name": "Memcached - Delete Hits/Second", - "queries": [{ - "query": "SELECT non_negative_derivative(max(\"delete_hits\"), 1s) AS \"delete_hits\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af412", - "name": "Memcached - Delete Misses/Second", - "queries": [{ - "query": "SELECT non_negative_derivative(max(\"delete_misses\"), 1s) AS \"delete_misses\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af411", - "name": "Memcached - Incr Hits/Second", - "queries": [{ - "query": "SELECT non_negative_derivative(max(\"incr_hits\"), 1s) AS \"incr_hits\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af510", - "name": "Memcached - Incr Misses/Second", - "queries": [{ - "query": "SELECT non_negative_derivative(max(\"incr_misses\"), 1s) AS \"incr_misses\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af402", - "name": "Memcached - Current Items", - "queries": [{ - "query": "SELECT max(\"curr_items\") AS \"current_items\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af403", - "name": "Memcached - Total Items", - "queries": [{ - "query": "SELECT max(\"total_items\") AS \"total_items\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af404", - "name": "Memcached - Bytes Stored", - "queries": [{ - "query": "SELECT max(\"bytes\") AS \"bytes\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af406", - "name": "Memcached - Bytes Read/Sec", - "queries": [{ - "query": "SELECT non_negative_derivative(max(\"bytes_read\"), 1s) AS \"bytes_read\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af407", - "name": "Memcached - Bytes Written/Sec", - "queries": [{ - "query": "SELECT non_negative_derivative(max(\"bytes_written\"), 1s) AS \"bytes_written\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af401", - "name": "Memcached - Evictions/10 Seconds", - "queries": [{ - "query": "SELECT non_negative_derivative(max(\"evictions\"), 10s) AS \"evictions\" FROM memcached", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - } - ] - } +{ + "id": "f280c8c7-0530-425c-b281-788d8ded7676", + "measurement": "memcached", + "app": "memcached", + "cells": [ + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af490", + "name": "Memcached - Current Connections", + "queries": [ + { + "query": "SELECT max(\"curr_connections\") AS \"current_connections\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af400", + "name": "Memcached - Get Hits/Second", + "queries": [ + { + "query": "SELECT non_negative_derivative(max(\"get_hits\"), 1s) AS \"get_hits\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af405", + "name": "Memcached - Get Misses/Second", + "queries": [ + { + "query": "SELECT non_negative_derivative(max(\"get_misses\"), 1s) AS \"get_misses\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af413", + "name": "Memcached - Delete Hits/Second", + "queries": [ + { + "query": "SELECT non_negative_derivative(max(\"delete_hits\"), 1s) AS \"delete_hits\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af412", + "name": "Memcached - Delete Misses/Second", + "queries": [ + { + "query": "SELECT non_negative_derivative(max(\"delete_misses\"), 1s) AS \"delete_misses\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af411", + "name": "Memcached - Incr Hits/Second", + "queries": [ + { + "query": "SELECT non_negative_derivative(max(\"incr_hits\"), 1s) AS \"incr_hits\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af510", + "name": "Memcached - Incr Misses/Second", + "queries": [ + { + "query": "SELECT non_negative_derivative(max(\"incr_misses\"), 1s) AS \"incr_misses\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af402", + "name": "Memcached - Current Items", + "queries": [ + { + "query": "SELECT max(\"curr_items\") AS \"current_items\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af403", + "name": "Memcached - Total Items", + "queries": [ + { + "query": "SELECT max(\"total_items\") AS \"total_items\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af404", + "name": "Memcached - Bytes Stored", + "queries": [ + { + "query": "SELECT max(\"bytes\") AS \"bytes\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af406", + "name": "Memcached - Bytes Read/Sec", + "queries": [ + { + "query": "SELECT non_negative_derivative(max(\"bytes_read\"), 1s) AS \"bytes_read\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af407", + "name": "Memcached - Bytes Written/Sec", + "queries": [ + { + "query": "SELECT non_negative_derivative(max(\"bytes_written\"), 1s) AS \"bytes_written\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "1b5716f1-b9d1-4a8b-b3bb-6e7d120af401", + "name": "Memcached - Evictions/10 Seconds", + "queries": [ + { + "query": "SELECT non_negative_derivative(max(\"evictions\"), 10s) AS \"evictions\" FROM memcached", + "groupbys": [], + "wheres": [] + } + ] + } + ] +} diff --git a/canned/mongodb.json b/canned/mongodb.json index 46ab7e5c9..d09108b03 100644 --- a/canned/mongodb.json +++ b/canned/mongodb.json @@ -1,117 +1,112 @@ - { - "id": "921298ad-0cdd-44f4-839b-10c319e7fcc7", - "measurement": "mongodb", - "app": "mongodb", - "cells": [{ - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "b2631fd5-7d32-4a31-9edf-98362fd3626e", - "name": "MongoDB – Read/Second", - "queries": [{ - "query": "SELECT mean(queries_per_sec) AS queries_per_second, mean(getmores_per_sec) AS getmores_per_second FROM mongodb", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, +{ + "id": "921298ad-0cdd-44f4-839b-10c319e7fcc7", + "measurement": "mongodb", + "app": "mongodb", + "cells": [ { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "9362e390-951b-4dba-adec-40c261e37604", - "name": "MongoDB – Writes/Second", - "queries": [{ - "query": "SELECT mean(inserts_per_sec) AS inserts_per_second, mean(updates_per_sec) AS updates_per_second, mean(deletes_per_sec) AS deletes_per_second FROM mongodb", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "b2631fd5-7d32-4a31-9edf-98362fd3626e", + "name": "MongoDB – Read/Second", + "queries": [ + { + "query": "SELECT mean(queries_per_sec) AS queries_per_second, mean(getmores_per_sec) AS getmores_per_second FROM mongodb", + "groupbys": [], + "wheres": [] + } + ] + }, { - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "7ca54d4c-9f0d-47fd-a7fe-2d01e832bbf4", - "name": "MongoDB – Active Connections", - "queries": [{ - "query": "SELECT mean(open_connections) AS open_connections FROM mongodb", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - -{ - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "ea5ae388-9ca3-42f9-835f-cc9b265705be", - "name": "MongoDB – Reads/Writes Waiting in Queue", - "queries": [{ - "query": "SELECT max(queued_reads) AS queued_reads, max(queued_writes) as queued_writes FROM mongodb", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, -{ - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "631dcbba-c997-4fd7-b640-754a1b36026c", - "name": "MongoDB – Network Bytes/Second", - "queries": [{ - "query": "SELECT mean(net_in_bytes) AS net_in_bytes, mean(net_out_bytes) as net_out_bytes FROM mongodb", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }, - -{ - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "5b03bef0-e5e9-4b53-b5f8-1d1b740cf5a2", - "name": "MongoDB – Page Faults", - "queries": [{ - "query": "SELECT mean(page_faults_per_sec) AS page_faults_per_second FROM mongodb", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - } - ] - }, - -{ - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "4bc98883-2347-46bb-9459-1c6fe7fb47a8", - "name": "MongoDB – Memory Usage (MB)", - "queries": [{ - "query": "SELECT mean(vsize_megabytes) AS virtual_memory_megabytes, mean(resident_megabytes) as resident_memory_megabytes FROM mongodb", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - } - ] - } - - ] - } + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "9362e390-951b-4dba-adec-40c261e37604", + "name": "MongoDB – Writes/Second", + "queries": [ + { + "query": "SELECT mean(inserts_per_sec) AS inserts_per_second, mean(updates_per_sec) AS updates_per_second, mean(deletes_per_sec) AS deletes_per_second FROM mongodb", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "7ca54d4c-9f0d-47fd-a7fe-2d01e832bbf4", + "name": "MongoDB – Active Connections", + "queries": [ + { + "query": "SELECT mean(open_connections) AS open_connections FROM mongodb", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "ea5ae388-9ca3-42f9-835f-cc9b265705be", + "name": "MongoDB – Reads/Writes Waiting in Queue", + "queries": [ + { + "query": "SELECT max(queued_reads) AS queued_reads, max(queued_writes) as queued_writes FROM mongodb", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "631dcbba-c997-4fd7-b640-754a1b36026c", + "name": "MongoDB – Network Bytes/Second", + "queries": [ + { + "query": "SELECT mean(net_in_bytes) AS net_in_bytes, mean(net_out_bytes) as net_out_bytes FROM mongodb", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "5b03bef0-e5e9-4b53-b5f8-1d1b740cf5a2", + "name": "MongoDB – Page Faults", + "queries": [ + { + "query": "SELECT mean(page_faults_per_sec) AS page_faults_per_second FROM mongodb", + "groupbys": [], + "wheres": [] + } + ] + }, + { + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "4bc98883-2347-46bb-9459-1c6fe7fb47a8", + "name": "MongoDB – Memory Usage (MB)", + "queries": [ + { + "query": "SELECT mean(vsize_megabytes) AS virtual_memory_megabytes, mean(resident_megabytes) as resident_memory_megabytes FROM mongodb", + "groupbys": [], + "wheres": [] + } + ] + } + ] +} diff --git a/canned/mysql.json b/canned/mysql.json index 82d5edaf1..983fd905a 100644 --- a/canned/mysql.json +++ b/canned/mysql.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"commands_select\")) AS selects_per_second FROM mysql", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], @@ -32,8 +30,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"commands_insert\")) AS inserts_per_second, non_negative_derivative(max(\"commands_update\")) AS updates_per_second, non_negative_derivative(max(\"commands_delete\")) AS deletes_per_second FROM mysql", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], @@ -51,8 +47,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"threads_connected\")) AS cxn_per_second, non_negative_derivative(max(\"threads_running\")) AS threads_running_per_second FROM mysql", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], @@ -70,8 +64,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"connection_errors_max_connections\")) AS cxn_errors_per_second, non_negative_derivative(max(\"connection_errors_internal\")) AS internal_cxn_errors_per_second, non_negative_derivative(max(\"aborted_connects\")) AS cxn_aborted_per_second FROM mysql", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], diff --git a/canned/netstat.json b/canned/netstat.json index e77b4367c..ab16794fa 100644 --- a/canned/netstat.json +++ b/canned/netstat.json @@ -1,7 +1,7 @@ { "id": "ff41d044-f61a-4522-8de7-9e39e3a1b5de", "measurement": "netstat", - "app": "network", + "app": "system", "cells": [ { "x": 0, @@ -13,15 +13,11 @@ "queries": [ { "query": "SELECT mean(\"tcp_established\") AS \"tcp_established\" FROM netstat", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] }, { "query": "SELECT mean(\"udp_socket\") AS \"udp_socket\" FROM netstat", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -37,15 +33,11 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"tcp_established\")) AS \"tcp_established\" FROM netstat", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] }, { "query": "SELECT non_negative_derivative(max(\"udp_socket\")) AS \"udp_socket\" FROM netstat", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/new_apps.sh b/canned/new_apps.sh index 4974ae64f..6d9b32ec5 100755 --- a/canned/new_apps.sh +++ b/canned/new_apps.sh @@ -37,24 +37,22 @@ UUID=$(uuidgen | tr A-Z a-z) APP_FILE="$measurement".json echo Creating measurement file $APP_FILE cat > $APP_FILE << EOF - { - "id": "$UUID", - "measurement": "$measurement", - "app": "$measurement", - "cells": [{ - "x": 0, - "y": 0, - "w": 4, - "h": 4, - "i": "$CELLID", - "name": "User facing cell Name", - "queries": [{ - "query": "select mean(\"used_percent from\") from disk", - "db": "telegraf", - "rp": "", - "groupbys": [], - "wheres": [] - }] - }] - } +{ + "id": "$UUID", + "measurement": "$measurement", + "app": "$measurement", + "cells": [{ + "x": 0, + "y": 0, + "w": 4, + "h": 4, + "i": "$CELLID", + "name": "User facing cell Name", + "queries": [{ + "query": "select mean(\"used_percent from\") from disk", + "groupbys": [], + "wheres": [] + }] + }] +} EOF diff --git a/canned/nginx.json b/canned/nginx.json index 9da2b345d..e53d2c02f 100644 --- a/canned/nginx.json +++ b/canned/nginx.json @@ -13,10 +13,8 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"accepts\"), 1s) AS \"accepts\", non_negative_derivative(max(\"handled\"), 1s) AS \"handled\", non_negative_derivative(max(\"active\"), 1s) AS \"active\" FROM nginx", - "db": "telegraf", - "rp": "", "groupbys": [ - "\"server\"" + "\"server\"" ], "wheres": [] } @@ -32,8 +30,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"accepts\")) - non_negative_derivative(max(\"handled\")) FROM nginx", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], @@ -51,8 +47,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"requests\"), 1s) AS \"requests\" FROM nginx", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], @@ -70,8 +64,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"waiting\"), 1s) AS \"waiting\", non_negative_derivative(max(\"reading\"), 1s) AS \"reading\", non_negative_derivative(max(\"writing\"), 1s) AS \"writing\" FROM nginx", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], diff --git a/canned/nsq_channel.json b/canned/nsq_channel.json index 8c9f848d8..e164dd816 100644 --- a/canned/nsq_channel.json +++ b/canned/nsq_channel.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"client_count\") AS \"client_count\" FROM nsq_channel", - "db": "telegraf", - "rp": "", "groupbys": [ "\"topic\"", "\"channel\"" @@ -33,8 +31,6 @@ "queries": [ { "query": "SELECT mean(\"message_count\") AS \"message_count\" FROM nsq_channel", - "db": "telegraf", - "rp": "", "groupbys": [ "\"topic\"", "\"channel\"" diff --git a/canned/nsq_server.json b/canned/nsq_server.json index 2b64a114d..5ad9fd63b 100644 --- a/canned/nsq_server.json +++ b/canned/nsq_server.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"topic_count\") AS \"topic_count\" FROM nsq_server", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -30,8 +28,6 @@ "queries": [ { "query": "SELECT mean(\"server_count\") AS \"server_count\" FROM nsq_server", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/nsq_topic.json b/canned/nsq_topic.json index 6a6b1446f..3e745af85 100644 --- a/canned/nsq_topic.json +++ b/canned/nsq_topic.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"depth\") AS \"depth\" FROM nsq_topic", - "db": "telegraf", - "rp": "", "groupbys": [ "\"topic\"" ], @@ -32,8 +30,6 @@ "queries": [ { "query": "SELECT mean(\"backend_depth\") AS \"backend_depth\" FROM nsq_topic", - "db": "telegraf", - "rp": "", "groupbys": [ "\"topic\"" ], @@ -51,8 +47,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"message_count\")) AS \"messages_per_second\" FROM nsq_topic", - "db": "telegraf", - "rp": "", "groupbys": [ "\"topic\"", "\"host\"" @@ -71,8 +65,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"message_count\")) - non_negative_derivative(max(\"depth\")) AS \"messages_per_second\" FROM nsq_topic", - "db": "telegraf", - "rp": "", "groupbys": [ "\"topic\"", "\"host\"" diff --git a/canned/ping.json b/canned/ping.json index f6b4c91d3..ee6587cfb 100644 --- a/canned/ping.json +++ b/canned/ping.json @@ -13,8 +13,6 @@ "queries": [ { "query": "select max(\"percent_packet_loss\") as \"packet_loss\" from ping", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], @@ -32,8 +30,6 @@ "queries": [ { "query": "select mean(\"average_response_ms\") as \"average\", mean(\"minimum_response_ms\") as \"min\", mean(\"maximum_response_ms\") as \"max\" from ping", - "db": "telegraf", - "rp": "", "groupbys": [ "\"server\"" ], diff --git a/canned/postgresql.json b/canned/postgresql.json index 902bd5784..bf9b4da71 100644 --- a/canned/postgresql.json +++ b/canned/postgresql.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(mean(\"tup_fetched\")) AS \"fetched\", non_negative_derivative(mean(\"tup_returned\")) AS \"returned\", non_negative_derivative(mean(\"tup_inserted\")) AS \"inserted\", non_negative_derivative(mean(\"tup_updated\")) AS \"updated\" FROM postgresql", - "db": "telegraf", - "rp": "", "groupbys": [ "db" ], @@ -32,8 +30,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(mean(\"xact_commit\")) AS \"xact_commit\" FROM postgresql", - "db": "telegraf", - "rp": "", "groupbys": [ "db" ], @@ -51,10 +47,7 @@ "queries": [ { "query": "SELECT mean(\"buffers_alloc\") AS \"buffers_allocated\", mean(\"buffers_backend\") AS \"buffers_backend\", mean(\"buffers_backend_fsync\") AS \"buffers_backend_fsync\", mean(\"buffers_checkpoint\") AS \"buffers_checkpoint\", mean(\"buffers_clean\") AS \"buffers_clean\" FROM postgresql", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] } ] @@ -69,10 +62,7 @@ "queries": [ { "query": "SELECT mean(\"conflicts\") AS \"conflicts\", mean(\"deadlocks\") AS \"deadlocks\" FROM postgresql", - "db": "telegraf", - "rp": "", - "groupbys": [ - ], + "groupbys": [], "wheres": [] } ] diff --git a/canned/processes.json b/canned/processes.json index e430b43a5..e4b682c61 100644 --- a/canned/processes.json +++ b/canned/processes.json @@ -1,7 +1,7 @@ { "id": "ffad2dff-d263-412e-806a-1e836af87942", "measurement": "processes", - "app": "processes", + "app": "system", "cells": [ { "x": 0, @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"total\") AS \"total\" FROM processes", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/redis.json b/canned/redis.json index e58cc3bbd..15f670517 100644 --- a/canned/redis.json +++ b/canned/redis.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"clients\") AS \"clients\" FROM redis", - "db": "telegraf", - "rp": "", "groupbys": [] } ] @@ -29,8 +27,6 @@ "queries": [ { "query": "SELECT mean(\"blocked_clients\") AS \"blocked_clients\" FROM redis", - "db": "telegraf", - "rp": "", "groupbys": [] } ] @@ -45,8 +41,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"used_cpu_user\")) AS \"used_cpu_per_second\" FROM redis", - "db": "telegraf", - "rp": "", "groupbys": [] } ] @@ -61,8 +55,6 @@ "queries": [ { "query": "SELECT non_negative_derivative(max(\"used_memory\")) AS \"used_memory_per_second\" FROM redis", - "db": "telegraf", - "rp": "", "groupbys": [] } ] diff --git a/canned/riak.json b/canned/riak.json index 221ced790..177144637 100644 --- a/canned/riak.json +++ b/canned/riak.json @@ -13,9 +13,9 @@ "queries": [ { "query": "SELECT max(\"memory_total\") as memory_total_bytes FROM riak", - "db": "telegraf", - "rp": "", - "groupbys": ["\"nodename\""], + "groupbys": [ + "\"nodename\"" + ], "wheres": [] } ] @@ -30,9 +30,9 @@ "queries": [ { "query": "SELECT max(\"node_get_fsm_objsize_median\") AS \"median\", max(\"node_get_fsm_objsize_100\") AS \"100th-percentile\", max(\"node_get_fsm_objsize_99\") AS \"99th-percentile\", max(\"node_get_fsm_objsize_mean\") AS \"mean\", max(\"node_get_fsm_objsize_95\") AS \"95th-percentile\" FROM riak", - "db": "telegraf", - "rp": "", - "groupbys": ["\"nodename\""], + "groupbys": [ + "\"nodename\"" + ], "wheres": [] } ] @@ -47,9 +47,9 @@ "queries": [ { "query": "SELECT max(\"node_get_fsm_siblings_median\") AS \"median\", max(\"node_get_fsm_siblings_mean\") AS \"mean\", max(\"node_get_fsm_siblings_99\") AS \"99th-percentile\", max(\"node_get_fsm_siblings_95\") AS \"95h-percentile\", max(\"node_get_fsm_siblings_100\") AS \"100th-percentile\" FROM riak", - "db": "telegraf", - "rp": "", - "groupbys": ["\"nodename\""], + "groupbys": [ + "\"nodename\"" + ], "wheres": [] } ] @@ -64,9 +64,9 @@ "queries": [ { "query": "SELECT max(\"node_put_fsm_time_median\") / 1000 AS \"median_put_milliseconds\", max(\"node_get_fsm_time_median\") / 1000 AS \"median_get_milliseconds\" FROM riak", - "db": "telegraf", - "rp": "", - "groupbys": ["\"nodename\""], + "groupbys": [ + "\"nodename\"" + ], "wheres": [] } ] @@ -81,9 +81,9 @@ "queries": [ { "query": "SELECT max(\"node_puts\") AS \"puts_per_minute\", max(\"node_gets\") AS \"gets_per_minute\" FROM riak", - "db": "telegraf", - "rp": "", - "groupbys": ["\"nodename\""], + "groupbys": [ + "\"nodename\"" + ], "wheres": [] } ] @@ -98,9 +98,9 @@ "queries": [ { "query": "SELECT max(\"pbc_active\") AS \"active_protobuf_connections\" FROM riak", - "db": "telegraf", - "rp": "", - "groupbys": ["\"nodename\""], + "groupbys": [ + "\"nodename\"" + ], "wheres": [] } ] @@ -115,9 +115,9 @@ "queries": [ { "query": "SELECT max(\"read_repairs\") AS \"read_repairs_per_minute\" FROM riak", - "db": "telegraf", - "rp": "", - "groupbys": ["\"nodename\""], + "groupbys": [ + "\"nodename\"" + ], "wheres": [] } ] diff --git a/canned/varnish.json b/canned/varnish.json index 50f229b74..a81c81fd5 100644 --- a/canned/varnish.json +++ b/canned/varnish.json @@ -13,8 +13,6 @@ "queries": [ { "query": "select non_negative_derivative(mean(cache_hit)) as hits, non_negative_derivative(mean(cache_miss)) as misses from varnish", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/win_cpu.json b/canned/win_cpu.json index 2dffddcfd..ca7067a45 100644 --- a/canned/win_cpu.json +++ b/canned/win_cpu.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"Percent_Processor_Time\") AS \"percent_processor_time\" FROM win_cpu", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/win_mem.json b/canned/win_mem.json index e4c8017b7..11906e20d 100644 --- a/canned/win_mem.json +++ b/canned/win_mem.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"Available_Bytes\") AS \"available_bytes\" FROM win_mem", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/win_net.json b/canned/win_net.json index 1367df7c2..bce8f2de6 100644 --- a/canned/win_net.json +++ b/canned/win_net.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"Bytes_Sent_persec\") AS \"bytes_sent\" FROM win_net", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } @@ -30,8 +28,6 @@ "queries": [ { "query": "SELECT mean(\"Bytes_Received_persec\") AS \"bytes_received\" FROM win_net", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/win_system.json b/canned/win_system.json index 226f4d2a2..c0a65e32e 100644 --- a/canned/win_system.json +++ b/canned/win_system.json @@ -13,8 +13,6 @@ "queries": [ { "query": "SELECT mean(\"Processor_Queue_Length\") AS \"load\" FROM win_system", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/canned/win_websvc.json b/canned/win_websvc.json index 4f82d9d70..689bc589c 100644 --- a/canned/win_websvc.json +++ b/canned/win_websvc.json @@ -13,22 +13,16 @@ "queries": [ { "query": "SELECT mean(\"Get_Requests_persec\") AS \"gets\" FROM win_websvc", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] }, { "query": "SELECT mean(\"Post_Requests_persec\") AS \"posts\" FROM win_websvc", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] }, { "query": "SELECT mean(\"Current_Connections\") AS \"connections\" FROM win_websvc", - "db": "telegraf", - "rp": "", "groupbys": [], "wheres": [] } diff --git a/chronograf.go b/chronograf.go index fc71f2d90..db971bdee 100644 --- a/chronograf.go +++ b/chronograf.go @@ -13,6 +13,7 @@ const ( ErrSourceNotFound = Error("source not found") ErrServerNotFound = Error("server not found") ErrLayoutNotFound = Error("layout not found") + ErrUserNotFound = Error("user not found") ErrLayoutInvalid = Error("layout is invalid") ErrAlertNotFound = Error("alert not found") ErrAuthentication = Error("user not authenticated") @@ -54,11 +55,11 @@ type TimeSeries interface { // Query retrieves a Response from a TimeSeries. type Query struct { - Command string `json:"query"` // Command is the query itself - DB string `json:"db"` // DB is optional and if empty will not be used. - RP string `json:"rp"` // RP is a retention policy and optional; if empty will not be used. - Wheres []string `json:"wheres"` // Wheres restricts the query to certain attributes - GroupBys []string `json:"groupbys"` // GroupBys collate the query by these tags + Command string `json:"query"` // Command is the query itself + DB string `json:"db,omitempty"` // DB is optional and if empty will not be used. + RP string `json:"rp,omitempty"` // RP is a retention policy and optional; if empty will not be used. + Wheres []string `json:"wheres"` // Wheres restricts the query to certain attributes + GroupBys []string `json:"groupbys"` // GroupBys collate the query by these tags } // Response is the result of a query against a TimeSeries @@ -75,6 +76,7 @@ type Source struct { Password string `json:"password,omitempty"` // Password is in CLEARTEXT URL string `json:"url"` // URL are the connections to the source Default bool `json:"default"` // Default specifies the default source for the application + Telegraf string `json:"telegraf"` // Telegraf is the db telegraf is written to. By default it is "telegraf" } // SourcesStore stores connection information for a `TimeSeries` @@ -195,23 +197,22 @@ type UserID int // User represents an authenticated user. type User struct { - ID UserID - Name string + ID UserID `json:"id"` + Email string `json:"email"` } -// AuthStore is the Storage and retrieval of authentication information -type AuthStore struct { - // User management for the AuthStore - Users interface { - // Create a new User in the AuthStore - Add(context.Context, User) error - // Delete the User from the AuthStore - Delete(context.Context, User) error - // Retrieve a user if `ID` exists. - Get(ctx context.Context, ID int) error - // Update the user's permissions or roles - Update(context.Context, User) error - } +// UsersStore is the Storage and retrieval of authentication information +type UsersStore interface { + // Create a new User in the UsersStore + Add(context.Context, *User) (*User, error) + // Delete the User from the UsersStore + Delete(context.Context, *User) error + // Get retrieves a user if `ID` exists. + Get(ctx context.Context, ID UserID) (*User, error) + // Update the user's permissions or roles + Update(context.Context, *User) error + // FindByEmail will retrieve a user by email address. + FindByEmail(ctx context.Context, Email string) (*User, error) } // ExplorationID is a unique ID for an Exploration. diff --git a/circle.yml b/circle.yml index a841e21a5..0d65c9815 100644 --- a/circle.yml +++ b/circle.yml @@ -1,32 +1,64 @@ --- machine: - services: - - docker - post: - - go version - - go version | grep 1.7.3 || (sudo rm -rf /usr/local/go && wget https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz && sudo tar -C /usr/local -xzf go1.7.3.linux-amd64.tar.gz) - - go version + services: + - docker + environment: + DOCKER_TAG: chronograf-20161121 dependencies: - pre: - - npm install -g node-sass - - git config --global url."git@github.com:".insteadOf "https://github.com/" - - mkdir -p ${HOME}/.go_workspace/src/github.com/influxdata - - ln -sf ${HOME}/chronograf ${HOME}/.go_workspace/src/github.com/influxdata - - "make clean": - pwd: ../.go_workspace/src/github.com/influxdata/chronograf - - "make": - pwd: ../.go_workspace/src/github.com/influxdata/chronograf + override: + - ./etc/scripts/docker/pull.sh test: - override: - - make test + override: + - > + ./etc/scripts/docker/run.sh + --test + --no-build deployment: - quayio: - branch: master - commands: - - make docker - - docker login -e $QUAY_EMAIL -u "$QUAY_USER" -p $QUAY_PASS quay.io - - docker tag chronograf quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7} - - docker push quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7} + master: + branch: master + commands: + - > + ./etc/scripts/docker/run.sh + --clean + --package + --platform all + --arch all + --upload + - sudo chown -R ubuntu:ubuntu /home/ubuntu + - cp build/linux/static_amd64/chronograf . + - docker build -t chronograf . + - docker login -e $QUAY_EMAIL -u "$QUAY_USER" -p $QUAY_PASS quay.io + - docker tag chronograf quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7} + - docker push quay.io/influxdb/chronograf:${CIRCLE_SHA1:0:7} + - mv ./build/* $CIRCLE_ARTIFACTS + pre-release: + tag: /^[0-9]+(\.[0-9]+)*(\S*)([a|rc|beta]([0-9]+))+$/ + commands: + - > + ./etc/scripts/docker/run.sh + --clean + --release + --package + --platform all + --arch all + --upload + --bucket dl.influxdata.com/chronograf/releases + - sudo chown -R ubuntu:ubuntu /home/ubuntu + - mv ./build/* $CIRCLE_ARTIFACTS + release: + tag: /^[0-9]+(\.[0-9]+)*$/ + commands: + - > + ./etc/scripts/docker/run.sh + --clean + --release + --package + --platform all + --arch all + --upload + --bucket dl.influxdata.com/chronograf/releases + - sudo chown -R ubuntu:ubuntu /home/ubuntu + - mv ./build/* $CIRCLE_ARTIFACTS diff --git a/docs/INSTALLATION.md b/docs/INSTALLATION.md index e01add6e0..4fab0e742 100644 --- a/docs/INSTALLATION.md +++ b/docs/INSTALLATION.md @@ -6,7 +6,7 @@ It makes owning the monitoring and alerting for your infrastructure easy to setu The next sections will get you up and running with Chronograf with as little configuration and code as possible. By the end of this document you will have downloaded, installed, and configured all four packages of the -TICK stack ([Telegraf](https://github.com/influxdata/telegraf), [InfluxDB](https://github.com/influxdata/influxdb), Chronograf, and [Kapacitor](https://github.com/influxdata/kapacitor)), and you will be all set to monitor you infrastructure. +TICK stack ([Telegraf](https://github.com/influxdata/telegraf), [InfluxDB](https://github.com/influxdata/influxdb), Chronograf, and [Kapacitor](https://github.com/influxdata/kapacitor)), and you will be all set to monitor your infrastructure. ## Operating System Support Chronograf and the other components of the TICK stack are supported on a large number of operating systems and hardware architectures. diff --git a/docs/images/welcome.png b/docs/images/welcome.png index 9f3841ae6..922dd20ca 100644 Binary files a/docs/images/welcome.png and b/docs/images/welcome.png differ diff --git a/docs/proto.md b/docs/proto.md new file mode 100644 index 000000000..6507e39ce --- /dev/null +++ b/docs/proto.md @@ -0,0 +1,9 @@ +download a binary here https://github.com/google/protobuf/releases/tag/v3.1.0 + +run the following 4 commands listed here https://github.com/gogo/protobuf +```sh +go get github.com/gogo/protobuf/proto +go get github.com/gogo/protobuf/jsonpb +go get github.com/gogo/protobuf/protoc-gen-gogo +go get github.com/gogo/protobuf/gogoproto +``` diff --git a/etc/build.py b/etc/build.py index 0f3d1fc29..5474e61cd 100755 --- a/etc/build.py +++ b/etc/build.py @@ -11,6 +11,7 @@ import hashlib import re import logging import argparse +import json ################ #### Chronograf Variables @@ -147,7 +148,6 @@ def run_generate(): """Generate static assets. """ logging.info("Generating static assets...") - run("make dep", shell=True) run("make assets", shell=True) return True @@ -157,75 +157,46 @@ def go_get(branch, update=False, no_uncommitted=False): if local_changes() and no_uncommitted: logging.error("There are uncommitted changes in the current directory.") return False - if not check_path_for("gdm"): - logging.info("Downloading `gdm`...") - get_command = "go get github.com/sparrc/gdm" - run(get_command) - logging.info("Retrieving dependencies with `gdm`...") - sys.stdout.flush() - run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH"))) + run("make dep", shell=True) return True def run_tests(race, parallel, timeout, no_vet): """Run the Go test suite on binary output. """ - logging.info("Starting tests...") - if race: - logging.info("Race is enabled.") - if parallel is not None: - logging.info("Using parallel: {}".format(parallel)) - if timeout is not None: - logging.info("Using timeout: {}".format(timeout)) - out = run("go fmt ./...") - if len(out) > 0: - logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") - logging.error("{}".format(out)) - return False - if not no_vet: - logging.info("Running 'go vet'...") - out = run(go_vet_command) - if len(out) > 0: - logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.") - logging.error("{}".format(out)) - return False - else: - logging.info("Skipping 'go vet' call...") - test_command = "go test -v" - if race: - test_command += " -race" - if parallel is not None: - test_command += " -parallel {}".format(parallel) - if timeout is not None: - test_command += " -timeout {}".format(timeout) - test_command += " ./..." logging.info("Running tests...") - output = run(test_command) - logging.debug("Test output:\n{}".format(output.encode('ascii', 'ignore'))) + run("make test", shell=True, print_output=True) return True ################ #### All Chronograf-specific content above this line ################ -def run(command, allow_failure=False, shell=False): +def run(command, allow_failure=False, shell=False, print_output=False): """Run shell command (convenience wrapper around subprocess). """ out = None logging.debug("{}".format(command)) try: - if shell: - out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) - else: - out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) - out = out.decode('utf-8').strip() - # logging.debug("Command output: {}".format(out)) - except subprocess.CalledProcessError as e: - if allow_failure: - logging.warn("Command '{}' failed with error: {}".format(command, e.output)) - return None - else: - logging.error("Command '{}' failed with error: {}".format(command, e.output)) - sys.exit(1) + cmd = command + if not shell: + cmd = command.split() + + stdout = subprocess.PIPE + stderr = subprocess.STDOUT + if print_output: + stdout = None + + p = subprocess.Popen(cmd, shell=shell, stdout=stdout, stderr=stderr) + out, _ = p.communicate() + if out is not None: + out = out.decode('utf-8').strip() + if p.returncode != 0: + if allow_failure: + logging.warn(u"Command '{}' failed with error: {}".format(command, out)) + return None + else: + logging.error(u"Command '{}' failed with error: {}".format(command, out)) + sys.exit(1) except OSError as e: if allow_failure: logging.warn("Command '{}' failed with error: {}".format(command, e)) @@ -767,6 +738,9 @@ def main(args): if not run_tests(args.race, args.parallel, args.timeout, args.no_vet): return 1 + if args.no_build: + return 0 + platforms = [] single_build = True if args.platform == 'all': @@ -828,10 +802,54 @@ def main(args): args.upload_overwrite = True if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite): return 1 - logging.info("Packages created:") + package_output = {} + package_output["version"] = args.version for p in packages: - logging.info("{} (MD5={})".format(p.split('/')[-1:][0], - generate_md5_from_file(p))) + p_name = p.split('/')[-1:][0] + if ".asc" in p_name: + # Skip public keys + continue + + arch = None + type = None + regex = None + if ".deb" in p_name: + type = "ubuntu" + regex = r"^.+_(.+)\.deb$" + elif ".rpm" in p_name: + type = "centos" + regex = r"^.+\.(.+)\.rpm$" + elif ".tar.gz" in p_name: + if "linux" in p_name: + if "static" in p_name: + type = "linux_static" + else: + type = "linux" + elif "darwin" in p_name: + type = "darwin" + regex = r"^.+_(.+)\.tar.gz$" + elif ".zip" in p_name: + if "windows" in p_name: + type = "windows" + regex = r"^.+_(.+)\.zip$" + + if regex is None or type is None: + logging.error("Could not determine package type for: {}".format(p)) + return 1 + match = re.search(regex, p_name) + arch = match.groups()[0] + if arch is None: + logging.error("Could not determine arch for: {}".format(p)) + return 1 + if arch == "x86_64": + arch = "amd64" + elif arch == "x86_32": + arch = "i386" + package_output[str(arch) + "_" + str(type)] = { + "md5": generate_md5_from_file(p), + "filename": p_name, + } + logging.info(json.dumps(package_output, sort_keys=True, indent=4)) if orig_branch != get_current_branch(): logging.info("Moving back to original git branch: {}".format(orig_branch)) run("git checkout {}".format(orig_branch)) @@ -964,6 +982,9 @@ if __name__ == '__main__': metavar='', type=str, help='Timeout for tests before failing') + parser.add_argument('--no-build', + action='store_true', + help='Dont build anything.') args = parser.parse_args() print_banner() sys.exit(main(args)) diff --git a/etc/scripts/docker/build.sh b/etc/scripts/docker/build.sh new file mode 100755 index 000000000..c40a1a236 --- /dev/null +++ b/etc/scripts/docker/build.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -x +docker_tag="chronograf-$(date +%Y%m%d)" + +docker build --rm=false -f etc/Dockerfile_build -t builder:$docker_tag . +docker tag builder:$docker_tag quay.io/influxdb/builder:$docker_tag + +docker push quay.io/influxdb/builder:$docker_tag diff --git a/etc/scripts/docker/pull.sh b/etc/scripts/docker/pull.sh new file mode 100755 index 000000000..dfe72f531 --- /dev/null +++ b/etc/scripts/docker/pull.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# +# Pull the required build image from quay.io. +# + +if [[ -z "$DOCKER_TAG" ]]; then + echo "Please specify a tag to pull from with the DOCKER_TAG env variable." + exit 1 +fi + +docker pull quay.io/influxdb/builder:$DOCKER_TAG diff --git a/etc/scripts/docker/run.sh b/etc/scripts/docker/run.sh new file mode 100755 index 000000000..adde95ffe --- /dev/null +++ b/etc/scripts/docker/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# +# Pass all CLI arguments to Chronograf builder Docker image (passing +# them to the build scripts) +# +# WARNING: This script passes your SSH and AWS credentials within the +# Docker image, so use with caution. +# + +set -e + +# Default SSH key to $HOME/.ssh/id_rsa if not set +test -z $SSH_KEY_PATH && SSH_KEY_PATH="$HOME/.ssh/id_rsa" +echo "Using SSH key located at: $SSH_KEY_PATH" + +# Default docker tag if not specified +test -z "$DOCKER_TAG" && DOCKER_TAG="chronograf-20161121" + +docker run \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -v $SSH_KEY_PATH:/root/.ssh/id_rsa \ + -v ~/.ssh/known_hosts:/root/.ssh/known_hosts \ + -v $(pwd):/root/go/src/github.com/influxdata/chronograf \ + quay.io/influxdb/builder:$DOCKER_TAG \ + "$@" diff --git a/server/exploration.go b/server/exploration.go index 23252106c..b04ce618f 100644 --- a/server/exploration.go +++ b/server/exploration.go @@ -45,14 +45,14 @@ type explorations struct { func (h *Service) Explorations(w http.ResponseWriter, r *http.Request) { id, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() mrExs, err := h.ExplorationStore.Query(ctx, chronograf.UserID(id)) if err != nil { - unknownErrorWithMessage(w, err) + unknownErrorWithMessage(w, err, h.Logger) return } @@ -71,20 +71,20 @@ func (h *Service) Explorations(w http.ResponseWriter, r *http.Request) { func (h *Service) ExplorationsID(w http.ResponseWriter, r *http.Request) { eID, err := paramID("eid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } uID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(eID)) if err != nil || e.UserID != chronograf.UserID(uID) { - notFound(w, eID) + notFound(w, eID, h.Logger) return } @@ -101,26 +101,26 @@ type patchExplorationRequest struct { func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) { id, err := paramID("eid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } uID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(id)) if err != nil || e.UserID != chronograf.UserID(uID) { - notFound(w, id) + notFound(w, id, h.Logger) return } var req patchExplorationRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } @@ -128,7 +128,7 @@ func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) { var ok bool if e.Data, ok = req.Data.(string); !ok { err := fmt.Errorf("Error: Exploration data is not a string") - invalidData(w, err) + invalidData(w, err, h.Logger) return } } @@ -139,7 +139,7 @@ func (h *Service) UpdateExploration(w http.ResponseWriter, r *http.Request) { if err := h.ExplorationStore.Update(ctx, e); err != nil { msg := "Error: Failed to update Exploration" - Error(w, http.StatusInternalServerError, msg) + Error(w, http.StatusInternalServerError, msg, h.Logger) return } @@ -156,14 +156,14 @@ type postExplorationRequest struct { func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) { uID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } // TODO: Check user if user exists. var req postExplorationRequest if err = json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } @@ -182,7 +182,7 @@ func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) { e, err = h.ExplorationStore.Add(ctx, e) if err != nil { msg := fmt.Errorf("Error: Failed to save Exploration") - unknownErrorWithMessage(w, msg) + unknownErrorWithMessage(w, msg, h.Logger) return } @@ -195,25 +195,25 @@ func (h *Service) NewExploration(w http.ResponseWriter, r *http.Request) { func (h *Service) RemoveExploration(w http.ResponseWriter, r *http.Request) { eID, err := paramID("eid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } uID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() e, err := h.ExplorationStore.Get(ctx, chronograf.ExplorationID(eID)) if err != nil || e.UserID != chronograf.UserID(uID) { - notFound(w, eID) + notFound(w, eID, h.Logger) return } if err := h.ExplorationStore.Delete(ctx, &chronograf.Exploration{ID: chronograf.ExplorationID(eID)}); err != nil { - unknownErrorWithMessage(w, err) + unknownErrorWithMessage(w, err, h.Logger) return } w.WriteHeader(http.StatusNoContent) diff --git a/server/kapacitors.go b/server/kapacitors.go index a62e627d4..cb97152eb 100644 --- a/server/kapacitors.go +++ b/server/kapacitors.go @@ -55,24 +55,24 @@ type kapacitor struct { func (h *Service) NewKapacitor(w http.ResponseWriter, r *http.Request) { srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() _, err = h.SourcesStore.Get(ctx, srcID) if err != nil { - notFound(w, srcID) + notFound(w, srcID, h.Logger) return } var req postKapacitorRequest if err = json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } if err := req.Valid(); err != nil { - invalidData(w, err) + invalidData(w, err, h.Logger) return } @@ -86,7 +86,7 @@ func (h *Service) NewKapacitor(w http.ResponseWriter, r *http.Request) { if srv, err = h.ServersStore.Add(ctx, srv); err != nil { msg := fmt.Errorf("Error storing kapacitor %v: %v", req, err) - unknownErrorWithMessage(w, msg) + unknownErrorWithMessage(w, msg, h.Logger) return } @@ -120,7 +120,7 @@ func (h *Service) Kapacitors(w http.ResponseWriter, r *http.Request) { ctx := r.Context() mrSrvs, err := h.ServersStore.All(ctx) if err != nil { - Error(w, http.StatusInternalServerError, "Error loading kapacitors") + Error(w, http.StatusInternalServerError, "Error loading kapacitors", h.Logger) return } @@ -140,20 +140,20 @@ func (h *Service) Kapacitors(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorsID(w http.ResponseWriter, r *http.Request) { id, err := paramID("kid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() srv, err := h.ServersStore.Get(ctx, id) if err != nil || srv.SrcID != srcID { - notFound(w, id) + notFound(w, id, h.Logger) return } @@ -165,25 +165,25 @@ func (h *Service) KapacitorsID(w http.ResponseWriter, r *http.Request) { func (h *Service) RemoveKapacitor(w http.ResponseWriter, r *http.Request) { id, err := paramID("kid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() srv, err := h.ServersStore.Get(ctx, id) if err != nil || srv.SrcID != srcID { - notFound(w, id) + notFound(w, id, h.Logger) return } if err = h.ServersStore.Delete(ctx, srv); err != nil { - unknownErrorWithMessage(w, err) + unknownErrorWithMessage(w, err, h.Logger) return } w.WriteHeader(http.StatusNoContent) @@ -213,31 +213,31 @@ func (p *patchKapacitorRequest) Valid() error { func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) { id, err := paramID("kid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() srv, err := h.ServersStore.Get(ctx, id) if err != nil || srv.SrcID != srcID { - notFound(w, id) + notFound(w, id, h.Logger) return } var req patchKapacitorRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } if err := req.Valid(); err != nil { - invalidData(w, err) + invalidData(w, err, h.Logger) return } @@ -256,7 +256,7 @@ func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) { if err := h.ServersStore.Update(ctx, srv); err != nil { msg := fmt.Sprintf("Error updating kapacitor ID %d", id) - Error(w, http.StatusInternalServerError, msg) + Error(w, http.StatusInternalServerError, msg, h.Logger) return } @@ -268,20 +268,20 @@ func (h *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) { id, err := paramID("kid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() srv, err := h.ServersStore.Get(ctx, id) if err != nil || srv.SrcID != srcID { - notFound(w, id) + notFound(w, id, h.Logger) return } @@ -295,7 +295,7 @@ func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) { var req chronograf.AlertRule if err = json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } // TODO: validate this data @@ -308,13 +308,13 @@ func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) { task, err := c.Create(ctx, req) if err != nil { - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } req.ID = task.ID rule, err := h.AlertRulesStore.Add(ctx, srcID, id, req) if err != nil { - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } @@ -348,20 +348,20 @@ type alertResponse struct { func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) { id, err := paramID("kid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() srv, err := h.ServersStore.Get(ctx, id) if err != nil || srv.SrcID != srcID { - notFound(w, id) + notFound(w, id, h.Logger) return } @@ -374,7 +374,7 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) { } var req chronograf.AlertRule if err = json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } // TODO: validate this data @@ -388,22 +388,22 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) { // Check if the rule exists and is scoped correctly if _, err := h.AlertRulesStore.Get(ctx, srcID, id, tid); err != nil { if err == chronograf.ErrAlertNotFound { - notFound(w, id) + notFound(w, id, h.Logger) return } - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } req.ID = tid task, err := c.Update(ctx, c.Href(tid), req) if err != nil { - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } if err := h.AlertRulesStore.Update(ctx, srcID, id, req); err != nil { - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } @@ -423,26 +423,26 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) { id, err := paramID("kid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() srv, err := h.ServersStore.Get(ctx, id) if err != nil || srv.SrcID != srcID { - notFound(w, id) + notFound(w, id, h.Logger) return } rules, err := h.AlertRulesStore.All(ctx, srcID, id) if err != nil { - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } @@ -454,7 +454,7 @@ func (h *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) { for _, rule := range rules { tickscript, err := ticker.Generate(rule) if err != nil { - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } @@ -476,24 +476,24 @@ type allAlertsResponse struct { Rules []alertResponse `json:"rules"` } -// KapacitorRulesGet retrieves specific task +// KapacitorRulesID retrieves specific task func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) { id, err := paramID("kid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() srv, err := h.ServersStore.Get(ctx, id) if err != nil || srv.SrcID != srcID { - notFound(w, id) + notFound(w, id, h.Logger) return } tid := httprouter.GetParamFromContext(ctx, "tid") @@ -501,10 +501,10 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) { rule, err := h.AlertRulesStore.Get(ctx, srcID, id, tid) if err != nil { if err == chronograf.ErrAlertNotFound { - notFound(w, id) + notFound(w, id, h.Logger) return } - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } @@ -512,7 +512,7 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) { c := kapa.Client{} tickscript, err := ticker.Generate(rule) if err != nil { - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } @@ -528,24 +528,24 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) { encodeJSON(w, http.StatusOK, res, h.Logger) } -// KapacitosRulesDelete proxies DELETE to kapacitor +// KapacitorRulesDelete proxies DELETE to kapacitor func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) { id, err := paramID("kid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() srv, err := h.ServersStore.Get(ctx, id) if err != nil || srv.SrcID != srcID { - notFound(w, id) + notFound(w, id, h.Logger) return } @@ -554,10 +554,10 @@ func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) { // Check if the rule is linked to this server and kapacitor if _, err := h.AlertRulesStore.Get(ctx, srcID, id, tid); err != nil { if err == chronograf.ErrAlertNotFound { - notFound(w, id) + notFound(w, id, h.Logger) return } - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } @@ -567,12 +567,12 @@ func (h *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) { Password: srv.Password, } if err := c.Delete(ctx, c.Href(tid)); err != nil { - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } if err := h.AlertRulesStore.Delete(ctx, srcID, id, chronograf.AlertRule{ID: tid}); err != nil { - Error(w, http.StatusInternalServerError, err.Error()) + Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } diff --git a/server/layout.go b/server/layout.go index c9b38f1aa..e600c288e 100644 --- a/server/layout.go +++ b/server/layout.go @@ -32,19 +32,19 @@ func newLayoutResponse(layout chronograf.Layout) layoutResponse { func (h *Service) NewLayout(w http.ResponseWriter, r *http.Request) { var layout chronograf.Layout if err := json.NewDecoder(r.Body).Decode(&layout); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } if err := ValidLayoutRequest(layout); err != nil { - invalidData(w, err) + invalidData(w, err, h.Logger) return } var err error if layout, err = h.LayoutStore.Add(r.Context(), layout); err != nil { msg := fmt.Errorf("Error storing layout %v: %v", layout, err) - unknownErrorWithMessage(w, msg) + unknownErrorWithMessage(w, msg, h.Logger) return } @@ -72,7 +72,7 @@ func (h *Service) Layouts(w http.ResponseWriter, r *http.Request) { ctx := r.Context() layouts, err := h.LayoutStore.All(ctx) if err != nil { - Error(w, http.StatusInternalServerError, "Error loading layouts") + Error(w, http.StatusInternalServerError, "Error loading layouts", h.Logger) return } @@ -104,7 +104,7 @@ func (h *Service) LayoutsID(w http.ResponseWriter, r *http.Request) { layout, err := h.LayoutStore.Get(ctx, id) if err != nil { - Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id)) + Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id), h.Logger) return } @@ -122,7 +122,7 @@ func (h *Service) RemoveLayout(w http.ResponseWriter, r *http.Request) { } if err := h.LayoutStore.Delete(ctx, layout); err != nil { - unknownErrorWithMessage(w, err) + unknownErrorWithMessage(w, err, h.Logger) return } @@ -136,25 +136,25 @@ func (h *Service) UpdateLayout(w http.ResponseWriter, r *http.Request) { _, err := h.LayoutStore.Get(ctx, id) if err != nil { - Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id)) + Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id), h.Logger) return } var req chronograf.Layout if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } req.ID = id if err := ValidLayoutRequest(req); err != nil { - invalidData(w, err) + invalidData(w, err, h.Logger) return } if err := h.LayoutStore.Update(ctx, req); err != nil { msg := fmt.Sprintf("Error updating layout ID %s: %v", id, err) - Error(w, http.StatusInternalServerError, msg) + Error(w, http.StatusInternalServerError, msg, h.Logger) return } diff --git a/server/mappings.go b/server/mappings.go index 2917cc651..e26b63013 100644 --- a/server/mappings.go +++ b/server/mappings.go @@ -16,7 +16,7 @@ func (h *Service) GetMappings(w http.ResponseWriter, r *http.Request) { ctx := r.Context() layouts, err := h.LayoutStore.All(ctx) if err != nil { - Error(w, http.StatusInternalServerError, "Error loading layouts") + Error(w, http.StatusInternalServerError, "Error loading layouts", h.Logger) return } diff --git a/server/mux.go b/server/mux.go index fbf6affc2..da04de6ed 100644 --- a/server/mux.go +++ b/server/mux.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/bouk/httprouter" - "github.com/influxdata/chronograf" // When julienschmidt/httprouter v2 w/ context is out, switch "github.com/influxdata/chronograf + "github.com/influxdata/chronograf" // When julienschmidt/httprouter v2 w/ context is out, switch "github.com/influxdata/chronograf/jwt" ) @@ -94,14 +94,13 @@ func NewMux(opts MuxOpts, service Service) http.Handler { router.DELETE("/chronograf/v1/layouts/:id", service.RemoveLayout) // Users - /* - router.GET("/chronograf/v1/users", Users) - router.POST("/chronograf/v1/users", NewUser) + router.GET("/chronograf/v1/me", service.Me) + router.POST("/chronograf/v1/users", service.NewUser) + + router.GET("/chronograf/v1/users/:id", service.UserID) + router.PATCH("/chronograf/v1/users/:id", service.UpdateUser) + router.DELETE("/chronograf/v1/users/:id", service.RemoveUser) - router.GET("/chronograf/v1/users/:id", UsersID) - router.PATCH("/chronograf/v1/users/:id", UpdateUser) - router.DELETE("/chronograf/v1/users/:id", RemoveUser) - */ // Explorations router.GET("/chronograf/v1/users/:id/explorations", service.Explorations) router.POST("/chronograf/v1/users/:id/explorations", service.NewExploration) @@ -133,7 +132,7 @@ func AuthAPI(opts MuxOpts, router *httprouter.Router) http.Handler { opts.Logger, ) - router.GET("/oauth", gh.Login()) + router.GET("/oauth/github", gh.Login()) router.GET("/oauth/logout", gh.Logout()) router.GET("/oauth/github/callback", gh.Callback()) @@ -152,44 +151,45 @@ func encodeJSON(w http.ResponseWriter, status int, v interface{}, logger chronog w.Header().Set("Content-Type", "application/json") w.WriteHeader(status) if err := json.NewEncoder(w).Encode(v); err != nil { - unknownErrorWithMessage(w, err) + unknownErrorWithMessage(w, err, logger) } } // Error writes an JSON message -func Error(w http.ResponseWriter, code int, msg string) { - e := struct { - Code int `json:"code"` - Message string `json:"message"` - }{ +func Error(w http.ResponseWriter, code int, msg string, logger chronograf.Logger) { + e := ErrorMessage{ Code: code, Message: msg, } b, err := json.Marshal(e) if err != nil { - //log.Print("go-oidc: failed to marshal %#v: %v", e, err) code = http.StatusInternalServerError b = []byte(`{"code": 500, "message":"server_error"}`) } + + logger. + WithField("component", "server"). + WithField("http_status ", code). + Error("Error message ", msg) w.Header().Set("Content-Type", JSONType) w.WriteHeader(code) w.Write(b) } -func invalidData(w http.ResponseWriter, err error) { - Error(w, http.StatusUnprocessableEntity, fmt.Sprintf("%v", err)) +func invalidData(w http.ResponseWriter, err error, logger chronograf.Logger) { + Error(w, http.StatusUnprocessableEntity, fmt.Sprintf("%v", err), logger) } -func invalidJSON(w http.ResponseWriter) { - Error(w, http.StatusBadRequest, "Unparsable JSON") +func invalidJSON(w http.ResponseWriter, logger chronograf.Logger) { + Error(w, http.StatusBadRequest, "Unparsable JSON", logger) } -func unknownErrorWithMessage(w http.ResponseWriter, err error) { - Error(w, http.StatusInternalServerError, fmt.Sprintf("Unknown error: %v", err)) +func unknownErrorWithMessage(w http.ResponseWriter, err error, logger chronograf.Logger) { + Error(w, http.StatusInternalServerError, fmt.Sprintf("Unknown error: %v", err), logger) } -func notFound(w http.ResponseWriter, id int) { - Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id)) +func notFound(w http.ResponseWriter, id int, logger chronograf.Logger) { + Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id), logger) } func paramID(key string, r *http.Request) (int, error) { diff --git a/server/proxy.go b/server/proxy.go index ab12279fe..10f0052ed 100644 --- a/server/proxy.go +++ b/server/proxy.go @@ -26,30 +26,30 @@ type postProxyResponse struct { func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) { id, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } var req chronograf.Query if err = json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } if err = ValidProxyRequest(req); err != nil { - invalidData(w, err) + invalidData(w, err, h.Logger) return } ctx := r.Context() src, err := h.SourcesStore.Get(ctx, id) if err != nil { - notFound(w, id) + notFound(w, id, h.Logger) return } if err = h.TimeSeries.Connect(ctx, &src); err != nil { msg := fmt.Sprintf("Unable to connect to source %d", id) - Error(w, http.StatusBadRequest, msg) + Error(w, http.StatusBadRequest, msg, h.Logger) return } @@ -57,11 +57,11 @@ func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) { if err != nil { if err == chronograf.ErrUpstreamTimeout { msg := "Timeout waiting for Influx response" - Error(w, http.StatusRequestTimeout, msg) + Error(w, http.StatusRequestTimeout, msg, h.Logger) return } // TODO: Here I want to return the error code from influx. - Error(w, http.StatusBadRequest, err.Error()) + Error(w, http.StatusBadRequest, err.Error(), h.Logger) return } @@ -75,33 +75,33 @@ func (h *Service) Proxy(w http.ResponseWriter, r *http.Request) { func (h *Service) KapacitorProxy(w http.ResponseWriter, r *http.Request) { srcID, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } id, err := paramID("kid", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } path := r.URL.Query().Get("path") if path == "" { - Error(w, http.StatusUnprocessableEntity, "path query parameter required") + Error(w, http.StatusUnprocessableEntity, "path query parameter required", h.Logger) return } ctx := r.Context() srv, err := h.ServersStore.Get(ctx, id) if err != nil || srv.SrcID != srcID { - notFound(w, id) + notFound(w, id, h.Logger) return } u, err := url.Parse(srv.URL) if err != nil { msg := fmt.Sprintf("Error parsing kapacitor url: %v", err) - Error(w, http.StatusUnprocessableEntity, msg) + Error(w, http.StatusUnprocessableEntity, msg, h.Logger) return } diff --git a/server/routes.go b/server/routes.go index 7b0fba244..4c256e794 100644 --- a/server/routes.go +++ b/server/routes.go @@ -11,6 +11,7 @@ type getRoutesResponse struct { Mappings string `json:"mappings"` // Location of the application mappings endpoint Sources string `json:"sources"` // Location of the sources endpoint Users string `json:"users"` // Location of the users endpoint + Me string `json:"me"` // Location of the me endpoint } // AllRoutes returns all top level routes within chronograf @@ -19,6 +20,7 @@ func AllRoutes(logger chronograf.Logger) http.HandlerFunc { Sources: "/chronograf/v1/sources", Layouts: "/chronograf/v1/layouts", Users: "/chronograf/v1/users", + Me: "/chronograf/v1/me", Mappings: "/chronograf/v1/mappings", } diff --git a/server/server.go b/server/server.go index b5d76d54b..d4d6cf5f3 100644 --- a/server/server.go +++ b/server/server.go @@ -47,6 +47,7 @@ type Server struct { handler http.Handler } +// BuildInfo is sent to the usage client to track versions and commits type BuildInfo struct { Version string Commit string @@ -59,7 +60,7 @@ func (s *Server) useAuth() bool { // Serve starts and runs the chronograf server func (s *Server) Serve() error { logger := clog.New(clog.ParseLevel(s.LogLevel)) - service := openService(s.BoltPath, s.CannedPath, logger) + service := openService(s.BoltPath, s.CannedPath, logger, s.useAuth()) s.handler = NewMux(MuxOpts{ Develop: s.Develop, TokenSecret: s.TokenSecret, @@ -105,7 +106,7 @@ func (s *Server) Serve() error { return nil } -func openService(boltPath, cannedPath string, logger chronograf.Logger) Service { +func openService(boltPath, cannedPath string, logger chronograf.Logger, useAuth bool) Service { db := bolt.NewClient() db.Path = boltPath if err := db.Open(); err != nil { @@ -136,11 +137,14 @@ func openService(boltPath, cannedPath string, logger chronograf.Logger) Service ExplorationStore: db.ExplorationStore, SourcesStore: db.SourcesStore, ServersStore: db.ServersStore, + UsersStore: db.UsersStore, TimeSeries: &influx.Client{ Logger: logger, }, LayoutStore: layouts, AlertRulesStore: db.AlertsStore, + Logger: logger, + UseAuth: useAuth, } } diff --git a/server/service.go b/server/service.go index 198b342a7..f4e87e351 100644 --- a/server/service.go +++ b/server/service.go @@ -9,6 +9,14 @@ type Service struct { ServersStore chronograf.ServersStore LayoutStore chronograf.LayoutStore AlertRulesStore chronograf.AlertRulesStore + UsersStore chronograf.UsersStore TimeSeries chronograf.TimeSeries Logger chronograf.Logger + UseAuth bool +} + +// ErrorMessage is the error response format for all service errors +type ErrorMessage struct { + Code int `json:"code"` + Message string `json:"message"` } diff --git a/server/sources.go b/server/sources.go index e4c790dc8..2ba2d29e5 100644 --- a/server/sources.go +++ b/server/sources.go @@ -21,6 +21,11 @@ type sourceResponse struct { } func newSourceResponse(src chronograf.Source) sourceResponse { + // If telegraf is not set, we'll set it to the default value. + if src.Telegraf == "" { + src.Telegraf = "telegraf" + } + httpAPISrcs := "/chronograf/v1/sources" return sourceResponse{ Source: src, @@ -36,18 +41,24 @@ func newSourceResponse(src chronograf.Source) sourceResponse { func (h *Service) NewSource(w http.ResponseWriter, r *http.Request) { var src chronograf.Source if err := json.NewDecoder(r.Body).Decode(&src); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } + if err := ValidSourceRequest(src); err != nil { - invalidData(w, err) + invalidData(w, err, h.Logger) return } + // By default the telegraf database will be telegraf + if src.Telegraf == "" { + src.Telegraf = "telegraf" + } + var err error if src, err = h.SourcesStore.Add(r.Context(), src); err != nil { msg := fmt.Errorf("Error storing source %v: %v", src, err) - unknownErrorWithMessage(w, msg) + unknownErrorWithMessage(w, msg, h.Logger) return } @@ -65,7 +76,7 @@ func (h *Service) Sources(w http.ResponseWriter, r *http.Request) { ctx := r.Context() srcs, err := h.SourcesStore.All(ctx) if err != nil { - Error(w, http.StatusInternalServerError, "Error loading sources") + Error(w, http.StatusInternalServerError, "Error loading sources", h.Logger) return } @@ -84,14 +95,14 @@ func (h *Service) Sources(w http.ResponseWriter, r *http.Request) { func (h *Service) SourcesID(w http.ResponseWriter, r *http.Request) { id, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() src, err := h.SourcesStore.Get(ctx, id) if err != nil { - notFound(w, id) + notFound(w, id, h.Logger) return } @@ -103,14 +114,14 @@ func (h *Service) SourcesID(w http.ResponseWriter, r *http.Request) { func (h *Service) RemoveSource(w http.ResponseWriter, r *http.Request) { id, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } src := chronograf.Source{ID: id} ctx := r.Context() if err = h.SourcesStore.Delete(ctx, src); err != nil { - unknownErrorWithMessage(w, err) + unknownErrorWithMessage(w, err, h.Logger) return } @@ -121,20 +132,20 @@ func (h *Service) RemoveSource(w http.ResponseWriter, r *http.Request) { func (h *Service) UpdateSource(w http.ResponseWriter, r *http.Request) { id, err := paramID("id", r) if err != nil { - Error(w, http.StatusUnprocessableEntity, err.Error()) + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) return } ctx := r.Context() src, err := h.SourcesStore.Get(ctx, id) if err != nil { - notFound(w, id) + notFound(w, id, h.Logger) return } var req chronograf.Source if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - invalidJSON(w) + invalidJSON(w, h.Logger) return } @@ -154,15 +165,18 @@ func (h *Service) UpdateSource(w http.ResponseWriter, r *http.Request) { if req.Type != "" { src.Type = req.Type } + if req.Telegraf != "" { + src.Telegraf = req.Telegraf + } if err := ValidSourceRequest(src); err != nil { - invalidData(w, err) + invalidData(w, err, h.Logger) return } if err := h.SourcesStore.Update(ctx, src); err != nil { msg := fmt.Sprintf("Error updating source ID %d", id) - Error(w, http.StatusInternalServerError, msg) + Error(w, http.StatusInternalServerError, msg, h.Logger) return } encodeJSON(w, http.StatusOK, newSourceResponse(src), h.Logger) diff --git a/server/sources_test.go b/server/sources_test.go new file mode 100644 index 000000000..290fa0a89 --- /dev/null +++ b/server/sources_test.go @@ -0,0 +1,58 @@ +package server + +import ( + "reflect" + "testing" + + "github.com/influxdata/chronograf" +) + +func Test_newSourceResponse(t *testing.T) { + tests := []struct { + name string + src chronograf.Source + want sourceResponse + }{ + { + name: "Test empty telegraf", + src: chronograf.Source{ + ID: 1, + Telegraf: "", + }, + want: sourceResponse{ + Source: chronograf.Source{ + ID: 1, + Telegraf: "telegraf", + }, + Links: sourceLinks{ + Self: "/chronograf/v1/sources/1", + Proxy: "/chronograf/v1/sources/1/proxy", + Kapacitors: "/chronograf/v1/sources/1/kapacitors", + }, + }, + }, + { + name: "Test non-default telegraf", + src: chronograf.Source{ + ID: 1, + Telegraf: "howdy", + }, + want: sourceResponse{ + Source: chronograf.Source{ + ID: 1, + Telegraf: "howdy", + }, + Links: sourceLinks{ + Self: "/chronograf/v1/sources/1", + Proxy: "/chronograf/v1/sources/1/proxy", + Kapacitors: "/chronograf/v1/sources/1/kapacitors", + }, + }, + }, + } + for _, tt := range tests { + if got := newSourceResponse(tt.src); !reflect.DeepEqual(got, tt.want) { + t.Errorf("%q. newSourceResponse() = %v, want %v", tt.name, got, tt.want) + } + } +} diff --git a/server/swagger.json b/server/swagger.json index 164f7b211..b20e5bc93 100644 --- a/server/swagger.json +++ b/server/swagger.json @@ -57,14 +57,16 @@ }, "post": { "summary": "Create new data source", - "parameters": [{ - "name": "source", - "in": "body", - "description": "Configuration options for data source", - "schema": { - "$ref": "#/definitions/Source" + "parameters": [ + { + "name": "source", + "in": "body", + "description": "Configuration options for data source", + "schema": { + "$ref": "#/definitions/Source" + } } - }], + ], "responses": { "201": { "description": "Successfully create data source", @@ -90,13 +92,15 @@ }, "/sources/{id}": { "get": { - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the data source", + "required": true + } + ], "summary": "Configured data sources", "description": "These data sources store time series data.", "responses": { @@ -122,21 +126,24 @@ }, "patch": { "summary": "Update data source configuration", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of a data source", - "required": true - }, { - "name": "config", - "in": "body", - "description": "data source configuration", - "schema": { - "$ref": "#/definitions/Source" + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of a data source", + "required": true }, - "required": true - }], + { + "name": "config", + "in": "body", + "description": "data source configuration", + "schema": { + "$ref": "#/definitions/Source" + }, + "required": true + } + ], "responses": { "200": { "description": "Data source's configuration was changed", @@ -159,13 +166,15 @@ } }, "delete": { - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true + } + ], "summary": "This specific data source will be removed from the data store", "responses": { "204": { @@ -189,21 +198,24 @@ "/sources/{id}/proxy": { "post": { "description": "Query the backend time series data source and return the response according to `format`", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the data source", - "required": true - }, { - "name": "query", - "in": "body", - "description": "Query Parameters", - "schema": { - "$ref": "#/definitions/Proxy" + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the data source", + "required": true }, - "required": true - }], + { + "name": "query", + "in": "body", + "description": "Query Parameters", + "schema": { + "$ref": "#/definitions/Proxy" + }, + "required": true + } + ], "responses": { "200": { "description": "Result of the query from the backend time series data source.", @@ -258,14 +270,16 @@ }, "post": { "summary": "Create new user for this data source", - "parameters": [{ - "name": "user", - "in": "body", - "description": "Configuration options for new user", - "schema": { - "$ref": "#/definitions/User" + "parameters": [ + { + "name": "user", + "in": "body", + "description": "Configuration options for new user", + "schema": { + "$ref": "#/definitions/User" + } } - }], + ], "responses": { "201": { "description": "Successfully created new user", @@ -291,13 +305,15 @@ }, "/users/{user_id}": { "get": { - "parameters": [{ - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of the specific user", - "required": true - }], + "parameters": [ + { + "name": "user_id", + "in": "path", + "type": "string", + "description": "ID of the specific user", + "required": true + } + ], "summary": "Returns information about a specific user", "description": "Specific User.\n", "responses": { @@ -323,21 +339,24 @@ }, "patch": { "summary": "Update user configuration", - "parameters": [{ - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of the specific user", - "required": true - }, { - "name": "config", - "in": "body", - "description": "user configuration", - "schema": { - "$ref": "#/definitions/User" + "parameters": [ + { + "name": "user_id", + "in": "path", + "type": "string", + "description": "ID of the specific user", + "required": true }, - "required": true - }], + { + "name": "config", + "in": "body", + "description": "user configuration", + "schema": { + "$ref": "#/definitions/User" + }, + "required": true + } + ], "responses": { "200": { "description": "Users's configuration was changed", @@ -360,13 +379,15 @@ } }, "delete": { - "parameters": [{ - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of the specific user", - "required": true - }], + "parameters": [ + { + "name": "user_id", + "in": "path", + "type": "string", + "description": "ID of the specific user", + "required": true + } + ], "summary": "This specific user will be removed from the data store", "responses": { "204": { @@ -389,13 +410,15 @@ }, "/users/{user_id}/explorations": { "get": { - "parameters": [{ - "name": "user_id", - "in": "path", - "type": "string", - "description": "All Data Explorations returned only for this user.", - "required": true - }], + "parameters": [ + { + "name": "user_id", + "in": "path", + "type": "string", + "description": "All Data Explorations returned only for this user.", + "required": true + } + ], "responses": { "200": { "description": "Data Explorations saved sessions for user are returned.", @@ -419,20 +442,23 @@ }, "post": { "summary": "Create new named exploration for this user", - "parameters": [{ - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of user to associate this exploration with.", - "required": true - }, { - "name": "exploration", - "in": "body", - "description": "Exploration session to save", - "schema": { - "$ref": "#/definitions/Exploration" + "parameters": [ + { + "name": "user_id", + "in": "path", + "type": "string", + "description": "ID of user to associate this exploration with.", + "required": true + }, + { + "name": "exploration", + "in": "body", + "description": "Exploration session to save", + "schema": { + "$ref": "#/definitions/Exploration" + } } - }], + ], "responses": { "201": { "description": "Successfully created new Exploration session", @@ -464,19 +490,22 @@ }, "/users/{user_id}/explorations/{exploration_id}": { "get": { - "parameters": [{ - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of user to associate this exploration with.", - "required": true - }, { - "name": "exploration_id", - "in": "path", - "type": "string", - "description": "ID of the specific exploration.", - "required": true - }], + "parameters": [ + { + "name": "user_id", + "in": "path", + "type": "string", + "description": "ID of user to associate this exploration with.", + "required": true + }, + { + "name": "exploration_id", + "in": "path", + "type": "string", + "description": "ID of the specific exploration.", + "required": true + } + ], "summary": "Returns the specified data exploration session", "description": "A data exploration session specifies query information.\n", "responses": { @@ -502,27 +531,31 @@ }, "patch": { "summary": "Update exploration configuration", - "parameters": [{ - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of user", - "required": true - }, { - "name": "exploration_id", - "in": "path", - "type": "string", - "description": "ID of the specific exploration.", - "required": true - }, { - "name": "exploration", - "in": "body", - "description": "Update the exploration information to this.", - "required": true, - "schema": { - "$ref": "#/definitions/Exploration" + "parameters": [ + { + "name": "user_id", + "in": "path", + "type": "string", + "description": "ID of user", + "required": true + }, + { + "name": "exploration_id", + "in": "path", + "type": "string", + "description": "ID of the specific exploration.", + "required": true + }, + { + "name": "exploration", + "in": "body", + "description": "Update the exploration information to this.", + "required": true, + "schema": { + "$ref": "#/definitions/Exploration" + } } - }], + ], "responses": { "200": { "description": "Exploration's configuration was changed", @@ -545,19 +578,22 @@ } }, "delete": { - "parameters": [{ - "name": "user_id", - "in": "path", - "type": "string", - "description": "ID of user to associate this exploration with.", - "required": true - }, { - "name": "exploration_id", - "in": "path", - "type": "string", - "description": "ID of the specific exploration.", - "required": true - }], + "parameters": [ + { + "name": "user_id", + "in": "path", + "type": "string", + "description": "ID of user to associate this exploration with.", + "required": true + }, + { + "name": "exploration_id", + "in": "path", + "type": "string", + "description": "ID of the specific exploration.", + "required": true + } + ], "summary": "This specific exporer session will be removed.", "responses": { "204": { @@ -580,13 +616,15 @@ }, "/sources/{id}/kapacitors": { "get": { - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true + } + ], "summary": "Configured kapacitors", "responses": { "200": { @@ -605,20 +643,23 @@ }, "post": { "summary": "Create new kapacitor backend", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapacitor", - "in": "body", - "description": "Configuration options for kapacitor", - "schema": { - "$ref": "#/definitions/Kapacitor" + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true + }, + { + "name": "kapacitor", + "in": "body", + "description": "Configuration options for kapacitor", + "schema": { + "$ref": "#/definitions/Kapacitor" + } } - }], + ], "responses": { "201": { "description": "Successfully created kapacitor source", @@ -644,19 +685,22 @@ }, "/sources/{id}/kapacitors/{kapa_id}": { "get": { - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true + }, + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of the kapacitor", + "required": true + } + ], "summary": "Configured kapacitors", "description": "These kapacitors are used for monitoring and alerting.", "responses": { @@ -682,27 +726,31 @@ }, "patch": { "summary": "Update kapacitor configuration", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of a kapacitor backend", - "required": true - }, { - "name": "config", - "in": "body", - "description": "kapacitor configuration", - "schema": { - "$ref": "#/definitions/Kapacitor" + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true }, - "required": true - }], + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of a kapacitor backend", + "required": true + }, + { + "name": "config", + "in": "body", + "description": "kapacitor configuration", + "schema": { + "$ref": "#/definitions/Kapacitor" + }, + "required": true + } + ], "responses": { "200": { "description": "Kapacitor's configuration was changed", @@ -725,19 +773,22 @@ } }, "delete": { - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true + }, + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of the kapacitor", + "required": true + } + ], "summary": "This specific kapacitor will be removed.", "responses": { "204": { @@ -761,19 +812,22 @@ "/sources/{id}/kapacitors/{kapa_id}/tasks": { "get": { "description": "Get all defined alert tasks.", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true + }, + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of the kapacitor backend.", + "required": true + } + ], "responses": { "200": { "description": "All alert tasks for this specific kapacitor are returned", @@ -797,27 +851,31 @@ }, "post": { "description": "Create kapacitor alert task", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, { - "name": "task", - "in": "body", - "description": "Rule to generate alert task", - "schema": { - "$ref": "#/definitions/Task" + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true }, - "required": true - }], + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of the kapacitor backend.", + "required": true + }, + { + "name": "task", + "in": "body", + "description": "Rule to generate alert task", + "schema": { + "$ref": "#/definitions/Task" + }, + "required": true + } + ], "responses": { "201": { "description": "Successfully created new kapacitor alert task", @@ -849,25 +907,29 @@ }, "/sources/{id}/kapacitors/{kapa_id}/tasks/{task_id}": { "get": { - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor", - "required": true - }, { - "name": "task_id", - "in": "path", - "type": "string", - "description": "ID of the task", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true + }, + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of the kapacitor", + "required": true + }, + { + "name": "task_id", + "in": "path", + "type": "string", + "description": "ID of the task", + "required": true + } + ], "summary": "Specific kapacitor alert task", "description": "Alerting task for kapacitor", "responses": { @@ -893,26 +955,28 @@ }, "put": { "summary": "Update rule alert task configuration", - "parameters": [{ + "parameters": [ + { "name": "id", "in": "path", "type": "string", "description": "ID of the source", "required": true - }, { + }, + { "name": "kapa_id", "in": "path", "type": "string", "description": "ID of a kapacitor backend", "required": true - }, { + }, + { "name": "task_id", "in": "path", "type": "string", "description": "ID of a task", "required": true }, - { "name": "task", "in": "body", @@ -945,26 +1009,28 @@ } }, "delete": { - "parameters": [{ + "parameters": [ + { "name": "id", "in": "path", "type": "string", "description": "ID of the source", "required": true - }, { + }, + { "name": "kapa_id", "in": "path", "type": "string", "description": "ID of the kapacitor", "required": true - }, { + }, + { "name": "task_id", "in": "path", "type": "string", "description": "ID of the task", "required": true } - ], "summary": "This specific alert task will be removed.", "responses": { @@ -989,25 +1055,29 @@ "/sources/{id}/kapacitors/{kapa_id}/proxy": { "get": { "description": "GET to `path` of kapacitor. The response and status code from kapacitor is directly returned.", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, { - "name": "path", - "in": "query", - "type": "string", - "description": "The kapacitor API path to use in the proxy redirect", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true + }, + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of the kapacitor backend.", + "required": true + }, + { + "name": "path", + "in": "query", + "type": "string", + "description": "The kapacitor API path to use in the proxy redirect", + "required": true + } + ], "responses": { "204": { "description": "Kapacitor returned no content" @@ -1028,25 +1098,29 @@ }, "delete": { "description": "DELETE to `path` of kapacitor. The response and status code from kapacitor is directly returned.", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, { - "name": "path", - "in": "query", - "type": "string", - "description": "The kapacitor API path to use in the proxy redirect", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true + }, + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of the kapacitor backend.", + "required": true + }, + { + "name": "path", + "in": "query", + "type": "string", + "description": "The kapacitor API path to use in the proxy redirect", + "required": true + } + ], "responses": { "204": { "description": "Kapacitor returned no content" @@ -1067,33 +1141,38 @@ }, "patch": { "description": "PATCH body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, { - "name": "path", - "in": "query", - "type": "string", - "description": "The kapacitor API path to use in the proxy redirect", - "required": true - }, { - "name": "query", - "in": "body", - "description": "Kapacitor body", - "schema": { - "$ref": "#/definitions/KapacitorProxy" + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true }, - "required": true - }], + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of the kapacitor backend.", + "required": true + }, + { + "name": "path", + "in": "query", + "type": "string", + "description": "The kapacitor API path to use in the proxy redirect", + "required": true + }, + { + "name": "query", + "in": "body", + "description": "Kapacitor body", + "schema": { + "$ref": "#/definitions/KapacitorProxy" + }, + "required": true + } + ], "responses": { "204": { "description": "Kapacitor returned no content" @@ -1114,33 +1193,38 @@ }, "post": { "description": "POST body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the source", - "required": true - }, { - "name": "kapa_id", - "in": "path", - "type": "string", - "description": "ID of the kapacitor backend.", - "required": true - }, { - "name": "path", - "in": "query", - "type": "string", - "description": "The kapacitor API path to use in the proxy redirect", - "required": true - }, { - "name": "query", - "in": "body", - "description": "Kapacitor body", - "schema": { - "$ref": "#/definitions/KapacitorProxy" + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the source", + "required": true }, - "required": true - }], + { + "name": "kapa_id", + "in": "path", + "type": "string", + "description": "ID of the kapacitor backend.", + "required": true + }, + { + "name": "path", + "in": "query", + "type": "string", + "description": "The kapacitor API path to use in the proxy redirect", + "required": true + }, + { + "name": "query", + "in": "body", + "description": "Kapacitor body", + "schema": { + "$ref": "#/definitions/KapacitorProxy" + }, + "required": true + } + ], "responses": { "204": { "description": "Kapacitor returned no content" @@ -1183,27 +1267,30 @@ "/layouts": { "get": { "summary": "Pre-configured layouts", - "parameters": [{ - "name": "measurement", - "in": "query", - "description": "Returns layouts with this measurement", - "required": false, - "type": "array", - "items": { - "type": "string" + "parameters": [ + { + "name": "measurement", + "in": "query", + "description": "Returns layouts with this measurement", + "required": false, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi" }, - "collectionFormat": "multi" - }, { - "name": "app", - "in": "query", - "description": "Returns layouts with this app", - "required": false, - "type": "array", - "items": { - "type": "string" - }, - "collectionFormat": "multi" - }], + { + "name": "app", + "in": "query", + "description": "Returns layouts with this app", + "required": false, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi" + } + ], "description": "Layouts are a collection of `Cells` that visualize time-series data.\n", "responses": { "200": { @@ -1222,14 +1309,16 @@ }, "post": { "summary": "Create new layout", - "parameters": [{ - "name": "layout", - "in": "body", - "description": "Defines the layout and queries of the cells within the layout.", - "schema": { - "$ref": "#/definitions/Layout" + "parameters": [ + { + "name": "layout", + "in": "body", + "description": "Defines the layout and queries of the cells within the layout.", + "schema": { + "$ref": "#/definitions/Layout" + } } - }], + ], "responses": { "201": { "description": "Successfully created new layout", @@ -1255,13 +1344,15 @@ }, "/layouts/{id}": { "get": { - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the layout", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the layout", + "required": true + } + ], "summary": "Specific pre-configured layout containing cells and queries.", "description": "layouts will hold information about how to layout the page of graphs.\n", "responses": { @@ -1286,13 +1377,15 @@ } }, "delete": { - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of the layout", - "required": true - }], + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of the layout", + "required": true + } + ], "summary": "This specific layout will be removed from the data store", "responses": { "204": { @@ -1314,21 +1407,24 @@ }, "put": { "summary": "Replace layout configuration.", - "parameters": [{ - "name": "id", - "in": "path", - "type": "string", - "description": "ID of a layout", - "required": true - }, { - "name": "config", - "in": "body", - "description": "layout configuration update parameters", - "schema": { - "$ref": "#/definitions/Layout" + "parameters": [ + { + "name": "id", + "in": "path", + "type": "string", + "description": "ID of a layout", + "required": true }, - "required": true - }], + { + "name": "config", + "in": "body", + "description": "layout configuration update parameters", + "schema": { + "$ref": "#/definitions/Layout" + }, + "required": true + } + ], "responses": { "200": { "description": "Layout has been replaced and the new layout is returned.", @@ -1481,6 +1577,11 @@ "type": "boolean", "description": "Indicates whether this source is the default source" }, + "telegraf": { + "type": "string", + "description": "Database where telegraf information is stored for this source", + "default": "telegraf" + }, "links": { "type": "object", "properties": { @@ -1711,7 +1812,32 @@ "description": "Time-series data queries for Cell.", "type": "array", "items": { - "$ref": "#/definitions/Proxy" + "$ref": "#/definitions/LayoutQuery" + } + } + } + }, + "LayoutQuery": { + "type": "object", + "required": [ + "query" + ], + "properties": { + "query": { + "type": "string" + }, + "wheres": { + "description": "Defines the condition clauses for influxdb", + "type": "array", + "items": { + "type": "string" + } + }, + "groupbys": { + "description": "Defines the group by clauses for influxdb", + "type": "array", + "items": { + "type": "string" } } } @@ -1783,4 +1909,4 @@ } } } -} +} \ No newline at end of file diff --git a/server/users.go b/server/users.go new file mode 100644 index 000000000..ad2450f17 --- /dev/null +++ b/server/users.go @@ -0,0 +1,180 @@ +package server + +import ( + "encoding/json" + "fmt" + "net/http" + + "golang.org/x/net/context" + + "github.com/influxdata/chronograf" +) + +type userLinks struct { + Self string `json:"self"` // Self link mapping to this resource + Explorations string `json:"explorations"` // URL for explorations endpoint +} + +type userResponse struct { + *chronograf.User + Links userLinks `json:"links"` +} + +func newUserResponse(usr *chronograf.User) userResponse { + base := "/chronograf/v1/users" + return userResponse{ + User: usr, + Links: userLinks{ + Self: fmt.Sprintf("%s/%d", base, usr.ID), + Explorations: fmt.Sprintf("%s/%d/explorations", base, usr.ID), + }, + } +} + +// NewUser adds a new valid user to the store +func (h *Service) NewUser(w http.ResponseWriter, r *http.Request) { + var usr *chronograf.User + if err := json.NewDecoder(r.Body).Decode(usr); err != nil { + invalidJSON(w, h.Logger) + return + } + if err := ValidUserRequest(usr); err != nil { + invalidData(w, err, h.Logger) + return + } + + var err error + if usr, err = h.UsersStore.Add(r.Context(), usr); err != nil { + msg := fmt.Errorf("error storing user %v: %v", *usr, err) + unknownErrorWithMessage(w, msg, h.Logger) + return + } + + res := newUserResponse(usr) + w.Header().Add("Location", res.Links.Self) + encodeJSON(w, http.StatusCreated, res, h.Logger) +} + +// UserID retrieves a user from the store +func (h *Service) UserID(w http.ResponseWriter, r *http.Request) { + id, err := paramID("id", r) + if err != nil { + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) + return + } + + ctx := r.Context() + usr, err := h.UsersStore.Get(ctx, chronograf.UserID(id)) + if err != nil { + notFound(w, id, h.Logger) + return + } + + res := newUserResponse(usr) + encodeJSON(w, http.StatusOK, res, h.Logger) +} + +// RemoveUser deletes the user from the store +func (h *Service) RemoveUser(w http.ResponseWriter, r *http.Request) { + id, err := paramID("id", r) + if err != nil { + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) + return + } + + usr := &chronograf.User{ID: chronograf.UserID(id)} + ctx := r.Context() + if err = h.UsersStore.Delete(ctx, usr); err != nil { + unknownErrorWithMessage(w, err, h.Logger) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +// UpdateUser handles incremental updates of a data user +func (h *Service) UpdateUser(w http.ResponseWriter, r *http.Request) { + id, err := paramID("id", r) + if err != nil { + Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger) + return + } + + ctx := r.Context() + usr, err := h.UsersStore.Get(ctx, chronograf.UserID(id)) + if err != nil { + notFound(w, id, h.Logger) + return + } + + var req chronograf.User + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + invalidJSON(w, h.Logger) + return + } + + usr.Email = req.Email + if err := ValidUserRequest(usr); err != nil { + invalidData(w, err, h.Logger) + return + } + + if err := h.UsersStore.Update(ctx, usr); err != nil { + msg := fmt.Sprintf("Error updating user ID %d", id) + Error(w, http.StatusInternalServerError, msg, h.Logger) + return + } + encodeJSON(w, http.StatusOK, newUserResponse(usr), h.Logger) +} + +// ValidUserRequest checks if email is nonempty +func ValidUserRequest(s *chronograf.User) error { + // email is required + if s.Email == "" { + return fmt.Errorf("Email required") + } + return nil +} + +func getEmail(ctx context.Context) (string, error) { + principal := ctx.Value(chronograf.PrincipalKey).(chronograf.Principal) + if principal == "" { + return "", fmt.Errorf("Token not found") + } + return string(principal), nil +} + +// Me does a findOrCreate based on the email in the context +func (h *Service) Me(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if !h.UseAuth { + Error(w, http.StatusTeapot, fmt.Sprintf("%v", "Go to line 151 users.go. Look for Arnold"), h.Logger) + _ = 42 // did you mean to learn the answer? if so go to line aslfjasdlfja; (gee willickers.... tbc) + return + } + email, err := getEmail(ctx) + if err != nil { + invalidData(w, err, h.Logger) + return + } + usr, err := h.UsersStore.FindByEmail(ctx, email) + if err == nil { + res := newUserResponse(usr) + encodeJSON(w, http.StatusOK, res, h.Logger) + return + } + + // Because we didnt find a user, making a new one + user := &chronograf.User{ + Email: email, + } + user, err = h.UsersStore.Add(ctx, user) + if err != nil { + msg := fmt.Errorf("error storing user %v: %v", user, err) + unknownErrorWithMessage(w, msg, h.Logger) + return + } + + res := newUserResponse(user) + encodeJSON(w, http.StatusOK, res, h.Logger) +} diff --git a/ui/spec/shared/components/PermissionsTableSpec.js b/ui/spec/shared/components/PermissionsTableSpec.js deleted file mode 100644 index 6251dbdc4..000000000 --- a/ui/spec/shared/components/PermissionsTableSpec.js +++ /dev/null @@ -1,68 +0,0 @@ -import PermissionsTable from 'src/shared/components/PermissionsTable'; -import React from 'react'; -import {shallow} from 'enzyme'; -import sinon from 'sinon'; - -describe('Shared.Components.PermissionsTable', function() { - it('renders a row for each permission', function() { - const permissions = [ - {name: 'ViewChronograf', displayName: 'View Chronograf', description: 'Can use Chronograf tools', resources: ['db1']}, - {name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']}, - ]; - - const wrapper = shallow( - - ); - - expect(wrapper.find('tr').length).to.equal(2); - expect(wrapper.find('table').text()).to.match(/View Chronograf/); - expect(wrapper.find('table').text()).to.match(/db1/); - expect(wrapper.find('table').text()).to.match(/Read/); - expect(wrapper.find('table').text()).to.match(/All Databases/); - }); - - it('only renders the control to add a resource when specified', function() { - const wrapper = shallow( - - ); - - expect(wrapper.find('.pill-add').length).to.equal(0); - }); - - it('only renders the "Remove" control when a callback is provided', function() { - const wrapper = shallow( - - ); - - expect(wrapper.find('.remove-permission').length).to.equal(0); - }); - - describe('when a user clicks "Remove"', function() { - it('fires a callback', function() { - const permission = {name: 'Read', displayName: 'Read', description: 'Can read data', resources: ['']}; - const cb = sinon.spy(); - const wrapper = shallow( - - ); - - wrapper.find('button[children="Remove"]').at(0).simulate('click'); - - expect(cb.calledWith(permission)).to.be.true; - }); - }); -}); diff --git a/ui/spec/utils/timeSeriesToDygraphSpec.js b/ui/spec/utils/timeSeriesToDygraphSpec.js new file mode 100644 index 000000000..8949597fd --- /dev/null +++ b/ui/spec/utils/timeSeriesToDygraphSpec.js @@ -0,0 +1,88 @@ +import timeSeriesToDygraph from 'src/utils/timeSeriesToDygraph'; + +describe('timeSeriesToDygraph', () => { + it('parses a raw InfluxDB response into a dygraph friendly data format', () => { + const influxResponse = [ + { + "response": + { + "results": [ + { + "series": [ + { + "name":"m1", + "columns": ["time","f1"], + "values": [[1000, 1],[2000, 2]], + }, + ] + }, + { + "series": [ + { + "name":"m1", + "columns": ["time","f2"], + "values": [[2000, 3],[4000, 4]], + }, + ] + }, + ], + }, + } + ]; + + const actual = timeSeriesToDygraph(influxResponse); + + const expected = { + fields: [ + 'time', + `m1.f1`, + `m1.f2`, + ], + timeSeries: [ + [new Date(1000), 1, null], + [new Date(2000), 2, 3], + [new Date(4000), null, 4], + ], + }; + + expect(actual).to.deep.equal(expected); + }); + + it('can sort numerical timestamps correctly', () => { + const influxResponse = [ + { + "response": + { + "results": [ + { + "series": [ + { + "name":"m1", + "columns": ["time","f1"], + "values": [[100, 1],[3000, 3],[200, 2]], + }, + ] + }, + ], + }, + } + ]; + + + const actual = timeSeriesToDygraph(influxResponse); + + const expected = { + fields: [ + 'time', + 'm1.f1', + ], + timeSeries: [ + [new Date(100), 1], + [new Date(200), 2], + [new Date(3000), 3], + ], + }; + + expect(actual).to.deep.equal(expected); + }); +}); diff --git a/ui/src/CheckSources.js b/ui/src/CheckSources.js index 7b64b30bd..b5a436051 100644 --- a/ui/src/CheckSources.js +++ b/ui/src/CheckSources.js @@ -53,7 +53,7 @@ const CheckSources = React.createClass({ const {isFetching, sources} = nextState; const source = sources.find((s) => s.id === params.sourceID); if (!isFetching && !source) { - return router.push(`/?redirectPath=${location.pathname}`); + return router.push(`/sources/new?redirectPath=${location.pathname}`); } if (!isFetching && !location.pathname.includes("/manage-sources")) { diff --git a/ui/src/alerts/containers/AlertsApp.js b/ui/src/alerts/containers/AlertsApp.js index 19fa968f6..ed336c911 100644 --- a/ui/src/alerts/containers/AlertsApp.js +++ b/ui/src/alerts/containers/AlertsApp.js @@ -1,9 +1,9 @@ import React, {PropTypes} from 'react'; -import {Link} from 'react-router'; import AlertsTable from '../components/AlertsTable'; import {getAlerts} from '../apis'; import AJAX from 'utils/ajax'; import _ from 'lodash'; +import NoKapacitorError from '../../shared/components/NoKapacitorError'; // Kevin: because we were getting strange errors saying // "Failed prop type: Required prop `source` was not specified in `AlertsApp`." @@ -83,16 +83,10 @@ const AlertsApp = React.createClass({ const {source} = this.props; if (this.state.hasKapacitor) { component = ( - + ); } else { - const path = `/sources/${source.id}/kapacitor-config`; - component = ( -
-

The current source does not have an associated Kapacitor instance, please configure one.

- Add Kapacitor -
- ); + component = ; } } return component; diff --git a/ui/src/auth/Login.js b/ui/src/auth/Login.js new file mode 100644 index 000000000..aaa0b8e85 --- /dev/null +++ b/ui/src/auth/Login.js @@ -0,0 +1,12 @@ +import React from 'react'; +import {withRouter} from 'react-router'; + +const Login = React.createClass({ + render() { + return ( + Click me to log in + ); + }, +}); + +export default withRouter(Login); diff --git a/ui/src/auth/index.js b/ui/src/auth/index.js new file mode 100644 index 000000000..488f89aec --- /dev/null +++ b/ui/src/auth/index.js @@ -0,0 +1,2 @@ +import Login from './Login'; +export {Login}; diff --git a/ui/src/hosts/apis/index.js b/ui/src/hosts/apis/index.js index 20b653401..0fae57bfc 100644 --- a/ui/src/hosts/apis/index.js +++ b/ui/src/hosts/apis/index.js @@ -2,11 +2,11 @@ import {proxy} from 'utils/queryUrlGenerator'; import AJAX from 'utils/ajax'; import _ from 'lodash'; -export function getCpuAndLoadForHosts(proxyLink) { +export function getCpuAndLoadForHosts(proxyLink, telegrafDB) { return proxy({ source: proxyLink, - query: `select mean(usage_user) from cpu where cpu = 'cpu-total' and time > now() - 10m group by host; select mean("load1") from "telegraf".."system" where time > now() - 10m group by host; select mean("Percent_Processor_Time") from win_cpu where time > now() - 10m group by host; select mean("Processor_Queue_Length") from win_system where time > now() - 10s group by host`, - db: 'telegraf', + query: `select mean(usage_user) from cpu where cpu = 'cpu-total' and time > now() - 10m group by host; select mean("load1") from "system" where time > now() - 10m group by host; select mean("Percent_Processor_Time") from win_cpu where time > now() - 10m group by host; select mean("Processor_Queue_Length") from win_system where time > now() - 10s group by host`, + db: telegrafDB, }).then((resp) => { const hosts = {}; const precision = 100; @@ -51,13 +51,13 @@ export function getMappings() { }); } -export function getAppsForHosts(proxyLink, hosts, appMappings) { +export function getAppsForHosts(proxyLink, hosts, appMappings, telegrafDB) { const measurements = appMappings.map((m) => `^${m.measurement}$`).join('|'); const measurementsToApps = _.zipObject(appMappings.map(m => m.measurement), appMappings.map(m => m.name)); return proxy({ source: proxyLink, query: `show series from /${measurements}/`, - db: 'telegraf', + db: telegrafDB, }).then((resp) => { const newHosts = Object.assign({}, hosts); const allSeries = _.get(resp, ['data', 'results', '0', 'series', '0', 'values'], []); @@ -81,3 +81,28 @@ export function getAppsForHosts(proxyLink, hosts, appMappings) { return newHosts; }); } + +export function getMeasurementsForHost(source, host) { + return proxy({ + source: source.links.proxy, + query: `SHOW MEASUREMENTS WHERE "host" = '${host}'`, + db: source.telegraf, + }).then(({data}) => { + if (_isEmpty(data) || _hasError(data)) { + return []; + } + + const series = data.results[0].series[0]; + return series.values.map((measurement) => { + return measurement[0]; + }); + }); +} + +function _isEmpty(resp) { + return !resp.results[0].series; +} + +function _hasError(resp) { + return !!resp.results[0].error; +} diff --git a/ui/src/hosts/containers/HostPage.js b/ui/src/hosts/containers/HostPage.js index 6c34041b7..af6cd8fc1 100644 --- a/ui/src/hosts/containers/HostPage.js +++ b/ui/src/hosts/containers/HostPage.js @@ -2,7 +2,7 @@ import React, {PropTypes} from 'react'; import LayoutRenderer from 'shared/components/LayoutRenderer'; import TimeRangeDropdown from '../../shared/components/TimeRangeDropdown'; import timeRanges from 'hson!../../shared/data/timeRanges.hson'; -import {getMappings, getAppsForHosts} from '../apis'; +import {getMappings, getAppsForHosts, getMeasurementsForHost} from 'src/hosts/apis'; import {fetchLayouts} from 'shared/apis'; export const HostPage = React.createClass({ @@ -11,6 +11,7 @@ export const HostPage = React.createClass({ links: PropTypes.shape({ proxy: PropTypes.string.isRequired, }).isRequired, + telegraf: PropTypes.string.isRequired, }), params: PropTypes.shape({ hostID: PropTypes.string.isRequired, @@ -32,21 +33,25 @@ export const HostPage = React.createClass({ }, componentDidMount() { - const hosts = {[this.props.params.hostID]: {name: this.props.params.hostID}}; + const {source, params} = this.props; + const hosts = {[params.hostID]: {name: params.hostID}}; // fetching layouts and mappings can be done at the same time fetchLayouts().then(({data: {layouts}}) => { getMappings().then(({data: {mappings}}) => { - getAppsForHosts(this.props.source.links.proxy, hosts, mappings).then((newHosts) => { - const host = newHosts[this.props.params.hostID]; - const filteredLayouts = layouts.filter((layout) => { - const focusedApp = this.props.location.query.app; - if (focusedApp) { - return layout.app === focusedApp; - } - return host.apps && host.apps.includes(layout.app); + getAppsForHosts(source.links.proxy, hosts, mappings, source.telegraf).then((newHosts) => { + getMeasurementsForHost(source, params.hostID).then((measurements) => { + const host = newHosts[this.props.params.hostID]; + const filteredLayouts = layouts.filter((layout) => { + const focusedApp = this.props.location.query.app; + if (focusedApp) { + return layout.app === focusedApp; + } + + return host.apps && host.apps.includes(layout.app) && measurements.includes(layout.measurement); + }); + this.setState({layouts: filteredLayouts}); }); - this.setState({layouts: filteredLayouts}); }); }); }); @@ -60,7 +65,7 @@ export const HostPage = React.createClass({ renderLayouts(layouts) { const autoRefreshMs = 15000; const {timeRange} = this.state; - const source = this.props.source.links.proxy; + const {source} = this.props; let layoutCells = []; layouts.forEach((layout) => { @@ -70,7 +75,7 @@ export const HostPage = React.createClass({ layoutCells.forEach((cell, i) => { cell.queries.forEach((q) => { q.text = q.query; - q.database = q.db; + q.database = source.telegraf; }); cell.x = (i * 4 % 12); // eslint-disable-line no-magic-numbers cell.y = 0; @@ -81,7 +86,7 @@ export const HostPage = React.createClass({ timeRange={timeRange} cells={layoutCells} autoRefreshMs={autoRefreshMs} - source={source} + source={source.links.proxy} host={this.props.params.hostID} /> ); diff --git a/ui/src/hosts/containers/HostsPage.js b/ui/src/hosts/containers/HostsPage.js index 84cbff622..91170fe3e 100644 --- a/ui/src/hosts/containers/HostsPage.js +++ b/ui/src/hosts/containers/HostsPage.js @@ -12,6 +12,7 @@ export const HostsPage = React.createClass({ links: PropTypes.shape({ proxy: PropTypes.string.isRequired, }).isRequired, + telegraf: PropTypes.string.isRequired, }), addFlashMessage: PropTypes.func, }, @@ -25,11 +26,11 @@ export const HostsPage = React.createClass({ componentDidMount() { const {source, addFlashMessage} = this.props; Promise.all([ - getCpuAndLoadForHosts(source.links.proxy), + getCpuAndLoadForHosts(source.links.proxy, source.telegraf), getMappings(), ]).then(([hosts, {data: {mappings}}]) => { this.setState({hosts}); - getAppsForHosts(source.links.proxy, hosts, mappings).then((newHosts) => { + getAppsForHosts(source.links.proxy, hosts, mappings, source.telegraf).then((newHosts) => { this.setState({hosts: newHosts}); }).catch(() => { addFlashMessage({type: 'error', text: 'Unable to get apps for hosts'}); diff --git a/ui/src/index.js b/ui/src/index.js index 4863b6146..734a6e7dd 100644 --- a/ui/src/index.js +++ b/ui/src/index.js @@ -1,25 +1,24 @@ -import React, {PropTypes} from 'react'; +import React from 'react'; import {render} from 'react-dom'; import {Provider} from 'react-redux'; -import {Router, Route, browserHistory} from 'react-router'; +import {Router, Route, browserHistory, Redirect} from 'react-router'; import App from 'src/App'; import AlertsApp from 'src/alerts'; import CheckSources from 'src/CheckSources'; import {HostsPage, HostPage} from 'src/hosts'; import {KubernetesPage} from 'src/kubernetes'; +import {Login} from 'src/auth'; import {KapacitorPage, KapacitorRulePage, KapacitorRulesPage, KapacitorTasksPage} from 'src/kapacitor'; import DataExplorer from 'src/chronograf'; import {CreateSource, SourceForm, ManageSources} from 'src/sources'; import NotFound from 'src/shared/components/NotFound'; -import NoClusterError from 'src/shared/components/NoClusterError'; import configureStore from 'src/store/configureStore'; -import {getSources} from 'shared/apis'; +import {getMe, getSources} from 'shared/apis'; +import {receiveMe} from 'shared/actions/me'; import 'src/style/enterprise_style/application.scss'; -const {number, shape, string, bool} = PropTypes; - const defaultTimeRange = {upper: null, lower: 'now() - 15m'}; const lsTimeRange = window.localStorage.getItem('timeRange'); const parsedTimeRange = JSON.parse(lsTimeRange) || {}; @@ -28,38 +27,15 @@ const timeRange = Object.assign(defaultTimeRange, parsedTimeRange); const store = configureStore({timeRange}); const rootNode = document.getElementById('react-root'); -const HTTP_SERVER_ERROR = 500; - const Root = React.createClass({ getInitialState() { return { - me: { - id: 1, - name: 'Chronograf', - email: 'foo@example.com', - admin: true, - }, - isFetching: false, - hasReadPermission: false, - clusterStatus: null, + loggedIn: null, }; }, - - childContextTypes: { - me: shape({ - id: number.isRequired, - name: string.isRequired, - email: string.isRequired, - admin: bool.isRequired, - }), + componentDidMount() { + this.checkAuth(); }, - - getChildContext() { - return { - me: this.state.me, - }; - }, - activeSource(sources) { const defaultSource = sources.find((s) => s.default); if (defaultSource && defaultSource.id) { @@ -68,29 +44,53 @@ const Root = React.createClass({ return sources[0]; }, - redirectToHosts(_, replace, callback) { + redirectFromRoot(_, replace, callback) { getSources().then(({data: {sources}}) => { if (sources && sources.length) { const path = `/sources/${this.activeSource(sources).id}/hosts`; replace(path); } callback(); - }).catch(callback); + }); + }, + + checkAuth() { + if (store.getState().me.links) { + return this.setState({loggedIn: true}); + } + getMe().then(({data: me}) => { + store.dispatch(receiveMe(me)); + this.setState({loggedIn: true}); + }).catch((err) => { + const AUTH_DISABLED = 418; + if (err.response.status === AUTH_DISABLED) { + return this.setState({loggedIn: true}); + // Could store a boolean indicating auth is not set up + } + + this.setState({loggedIn: false}); + }); }, render() { - if (this.state.isFetching) { - return null; + if (this.state.loggedIn === null) { + return
; } - - if (this.state.clusterStatus === HTTP_SERVER_ERROR) { - return ; + if (this.state.loggedIn === false) { + return ( + + + + + + + ); } - return ( - + + diff --git a/ui/src/kapacitor/actions/view/index.js b/ui/src/kapacitor/actions/view/index.js index 64d28927b..250757d98 100644 --- a/ui/src/kapacitor/actions/view/index.js +++ b/ui/src/kapacitor/actions/view/index.js @@ -43,16 +43,14 @@ export function loadDefaultRule() { }; } -export function fetchRules(source) { +export function fetchRules(kapacitor) { return (dispatch) => { - getKapacitor(source).then((kapacitor) => { - getRules(kapacitor).then(({data: {rules}}) => { - dispatch({ - type: 'LOAD_RULES', - payload: { - rules, - }, - }); + getRules(kapacitor).then(({data: {rules}}) => { + dispatch({ + type: 'LOAD_RULES', + payload: { + rules, + }, }); }); }; diff --git a/ui/src/kapacitor/containers/KapacitorRulesPage.js b/ui/src/kapacitor/containers/KapacitorRulesPage.js index 59dc70142..ca92ddc3a 100644 --- a/ui/src/kapacitor/containers/KapacitorRulesPage.js +++ b/ui/src/kapacitor/containers/KapacitorRulesPage.js @@ -2,11 +2,14 @@ import React, {PropTypes} from 'react'; import {connect} from 'react-redux'; import {bindActionCreators} from 'redux'; import {Link} from 'react-router'; -import * as kapacitorActionCreators from 'src/kapacitor/actions/view'; +import {getKapacitor} from 'src/shared/apis'; +import * as kapacitorActionCreators from '../actions/view'; +import NoKapacitorError from '../../shared/components/NoKapacitorError'; export const KapacitorRulesPage = React.createClass({ propTypes: { source: PropTypes.shape({ + id: PropTypes.string.isRequired, links: PropTypes.shape({ proxy: PropTypes.string.isRequired, self: PropTypes.string.isRequired, @@ -26,8 +29,20 @@ export const KapacitorRulesPage = React.createClass({ addFlashMessage: PropTypes.func, }, + getInitialState() { + return { + hasKapacitor: false, + loading: true, + }; + }, + componentDidMount() { - this.props.actions.fetchRules(this.props.source); + getKapacitor(this.props.source).then((kapacitor) => { + if (kapacitor) { + this.props.actions.fetchRules(kapacitor); + } + this.setState({loading: false, hasKapacitor: !!kapacitor}); + }); }, handleDeleteRule(rule) { @@ -35,9 +50,45 @@ export const KapacitorRulesPage = React.createClass({ actions.deleteRule(rule); }, - render() { + renderSubComponent() { const {source} = this.props; + const {hasKapacitor, loading} = this.state; + let component; + if (loading) { + component = (

Loading...

); + } else if (hasKapacitor) { + component = ( +
+
+

Alert Rules

+ Create New Rule +
+
+ + + + + + + + + + + + {this.renderAlertsTableRows()} + +
NameTriggerMessageAlerts
+
+
+ ); + } else { + component = ; + } + return component; + }, + + render() { return (
@@ -49,28 +100,7 @@ export const KapacitorRulesPage = React.createClass({
-
-
-

Alert Rules

- Create New Rule -
-
- - - - - - - - - - - - {this.renderAlertsTableRows()} - -
NameTriggerMessageAlerts
-
-
+ {this.renderSubComponent()}
diff --git a/ui/src/kubernetes/components/KubernetesDashboard.js b/ui/src/kubernetes/components/KubernetesDashboard.js index b785ef3ac..813f8c477 100644 --- a/ui/src/kubernetes/components/KubernetesDashboard.js +++ b/ui/src/kubernetes/components/KubernetesDashboard.js @@ -9,6 +9,7 @@ export const KubernetesPage = React.createClass({ links: PropTypes.shape({ proxy: PropTypes.string.isRequired, }).isRequired, + telegraf: PropTypes.string.isRequired, }), layouts: PropTypes.arrayOf(PropTypes.shape().isRequired).isRequired, }, @@ -23,7 +24,7 @@ export const KubernetesPage = React.createClass({ renderLayouts(layouts) { const autoRefreshMs = 15000; const {timeRange} = this.state; - const source = this.props.source.links.proxy; + const {source} = this.props; let layoutCells = []; layouts.forEach((layout) => { @@ -33,7 +34,7 @@ export const KubernetesPage = React.createClass({ layoutCells.forEach((cell, i) => { cell.queries.forEach((q) => { q.text = q.query; - q.database = q.db; + q.database = source.telegraf; }); cell.x = (i * 4 % 12); // eslint-disable-line no-magic-numbers cell.y = 0; @@ -44,7 +45,7 @@ export const KubernetesPage = React.createClass({ timeRange={timeRange} cells={layoutCells} autoRefreshMs={autoRefreshMs} - source={source} + source={source.links.proxy} /> ); }, @@ -66,12 +67,12 @@ export const KubernetesPage = React.createClass({ return (
-
-
-
+
+
+

Kubernetes Dashboard

-
+

Range:

diff --git a/ui/src/shared/actions/me.js b/ui/src/shared/actions/me.js new file mode 100644 index 000000000..9f4b26057 --- /dev/null +++ b/ui/src/shared/actions/me.js @@ -0,0 +1,14 @@ +export function receiveMe(me) { + return { + type: 'ME_RECEIVED', + payload: { + me, + }, + }; +} + +export function logout() { + return { + type: 'LOGOUT', + }; +} diff --git a/ui/src/shared/apis/index.js b/ui/src/shared/apis/index.js index b9f13f0dc..fee75ba61 100644 --- a/ui/src/shared/apis/index.js +++ b/ui/src/shared/apis/index.js @@ -7,6 +7,13 @@ export function fetchLayouts() { }); } +export function getMe() { + return AJAX({ + url: `/chronograf/v1/me`, + method: 'GET', + }); +} + export function getSources() { return AJAX({ url: '/chronograf/v1/sources', diff --git a/ui/src/shared/components/AddClusterAccounts.js b/ui/src/shared/components/AddClusterAccounts.js deleted file mode 100644 index 8dea74cc2..000000000 --- a/ui/src/shared/components/AddClusterAccounts.js +++ /dev/null @@ -1,79 +0,0 @@ -import React, {PropTypes} from 'react'; - -const {arrayOf, number, shape, func, string} = PropTypes; - -const AddClusterAccounts = React.createClass({ - propTypes: { - clusters: arrayOf(shape({ - id: number.isRequired, - cluster_users: arrayOf(shape({ - name: string.isRequired, - })), - dipslay_name: string, - cluster_id: string.isRequired, - })).isRequired, - onSelectClusterAccount: func.isRequired, - headerText: string, - }, - - getDefaultProps() { - return { - headerText: 'Pair With Cluster Accounts', - }; - }, - - handleSelectClusterAccount(e, clusterID) { - this.props.onSelectClusterAccount({ - clusterID, - accountName: e.target.value, - }); - }, - - render() { - return ( -
- { - this.props.clusters.map((cluster, i) => { - return ( -
-
- {i === 0 ? : null} -
- {cluster.display_name || cluster.cluster_id} -
-
-
- {i === 0 ? : null} - {this.renderClusterUsers(cluster)} -
-
- ); - }) - } -
- ); - }, - - renderClusterUsers(cluster) { - if (!cluster.cluster_users) { - return ( - - ); - } - - return ( - - ); - }, -}); - -export default AddClusterAccounts; diff --git a/ui/src/shared/components/AddPermissionModal.js b/ui/src/shared/components/AddPermissionModal.js deleted file mode 100644 index 911fd1032..000000000 --- a/ui/src/shared/components/AddPermissionModal.js +++ /dev/null @@ -1,124 +0,0 @@ -import React, {PropTypes} from 'react'; - -const CLUSTER_WIDE_PERMISSIONS = ["CreateDatabase", "AddRemoveNode", "ManageShard", "DropDatabase", "CopyShard", "Rebalance"]; - -const AddPermissionModal = React.createClass({ - propTypes: { - activeCluster: PropTypes.string.isRequired, - permissions: PropTypes.arrayOf(PropTypes.shape({ - name: PropTypes.string.isRequired, - displayName: PropTypes.string.isRequired, - description: PropTypes.string.isRequired, - })), - databases: PropTypes.arrayOf(PropTypes.string.isRequired).isRequired, - onAddPermission: PropTypes.func.isRequired, - }, - - getInitialState() { - return { - selectedPermission: null, - selectedDatabase: '', - }; - }, - - handlePermissionClick(permission) { - this.setState({ - selectedPermission: permission, - selectedDatabase: '', - }); - }, - - handleDatabaseChange(e) { - this.setState({selectedDatabase: e.target.value}); - }, - - handleSubmit(e) { - e.preventDefault(); - this.props.onAddPermission({ - name: this.state.selectedPermission, - resources: [this.state.selectedDatabase], - }); - $('#addPermissionModal').modal('hide'); // eslint-disable-line no-undef - }, - - render() { - const {permissions} = this.props; - - return ( - - ); - }, - - renderFooter() { - return ( -
- - -
- ); - }, - - renderOptions() { - return ( -
- {this.state.selectedPermission ? this.renderDatabases() : null} -
- ); - }, - - renderDatabases() { - const isClusterWide = CLUSTER_WIDE_PERMISSIONS.includes(this.state.selectedPermission); - if (!this.props.databases.length || isClusterWide) { - return null; - } - - return ( -
-
-
- - -
-
-
- ); - }, -}); - -export default AddPermissionModal; diff --git a/ui/src/shared/components/AutoRefresh.js b/ui/src/shared/components/AutoRefresh.js index 954e42a5a..487b6c014 100644 --- a/ui/src/shared/components/AutoRefresh.js +++ b/ui/src/shared/components/AutoRefresh.js @@ -60,7 +60,7 @@ export default function AutoRefresh(ComposedComponent) { const newSeries = []; queries.forEach(({host, database, rp, text}) => { _fetchTimeSeries(host, database, rp, text).then((resp) => { - newSeries.push({identifier: host, response: resp.data}); + newSeries.push({response: resp.data}); count += 1; if (count === queries.length) { this.setState({ diff --git a/ui/src/shared/components/ClusterError.js b/ui/src/shared/components/ClusterError.js deleted file mode 100644 index f3dd4c923..000000000 --- a/ui/src/shared/components/ClusterError.js +++ /dev/null @@ -1,24 +0,0 @@ -import React from 'react'; - -const {node} = React.PropTypes; -const ClusterError = React.createClass({ - propTypes: { - children: node.isRequired, - }, - - render() { - return ( -
-
-
-
- {this.props.children} -
-
-
-
- ); - }, -}); - -export default ClusterError; diff --git a/ui/src/shared/components/Dygraph.js b/ui/src/shared/components/Dygraph.js index 2ca358d20..3aec7750b 100644 --- a/ui/src/shared/components/Dygraph.js +++ b/ui/src/shared/components/Dygraph.js @@ -131,6 +131,7 @@ export default React.createClass({ const timeSeries = this.getTimeSeries(); const {fields, yRange} = this.props; + dygraph.updateOptions({ labels: fields, file: timeSeries, diff --git a/ui/src/shared/components/InsufficientPermissions.js b/ui/src/shared/components/InsufficientPermissions.js deleted file mode 100644 index 7626a77f5..000000000 --- a/ui/src/shared/components/InsufficientPermissions.js +++ /dev/null @@ -1,21 +0,0 @@ -import React from 'react'; -import ClusterError from './ClusterError'; - -const InsufficientPermissions = React.createClass({ - render() { - return ( - -
-

- {`Your account has insufficient permissions`} -

-
-
-

Talk to your admin to get additional permissions for access

-
-
- ); - }, -}); - -export default InsufficientPermissions; diff --git a/ui/src/shared/components/LayoutRenderer.js b/ui/src/shared/components/LayoutRenderer.js index 3ad33c9ff..a17d38861 100644 --- a/ui/src/shared/components/LayoutRenderer.js +++ b/ui/src/shared/components/LayoutRenderer.js @@ -101,7 +101,7 @@ export const LayoutRenderer = React.createClass({ render() { const layoutMargin = 4; return ( - + {this.generateGraphs()} ); diff --git a/ui/src/shared/components/NoClusterError.js b/ui/src/shared/components/NoClusterError.js deleted file mode 100644 index b55ecd33c..000000000 --- a/ui/src/shared/components/NoClusterError.js +++ /dev/null @@ -1,35 +0,0 @@ -import React from 'react'; -import errorCopy from 'hson!shared/copy/errors.hson'; - -const NoClusterError = React.createClass({ - render() { - return ( -
-
-
-
-
-
-

- {errorCopy.noCluster.head} -

-
-
-

How to resolve:

-

- {errorCopy.noCluster.body} -

-
- -
-
-
-
-
-
-
- ); - }, -}); - -export default NoClusterError; diff --git a/ui/src/shared/components/NoClusterLinksError.js b/ui/src/shared/components/NoClusterLinksError.js deleted file mode 100644 index 72b92dea6..000000000 --- a/ui/src/shared/components/NoClusterLinksError.js +++ /dev/null @@ -1,27 +0,0 @@ -import React from 'react'; - -const NoClusterLinksError = React.createClass({ - render() { - return ( -
-
-
-
-
-

- This user is not associated with any cluster accounts! -

-
-
-

Many features in Chronograf require your user to be associated with a cluster account.

-

Ask an administrator to associate your user with a cluster account.

-
-
-
-
-
- ); - }, -}); - -export default NoClusterLinksError; diff --git a/ui/src/shared/components/NoKapacitorError.js b/ui/src/shared/components/NoKapacitorError.js new file mode 100644 index 000000000..a82451dcc --- /dev/null +++ b/ui/src/shared/components/NoKapacitorError.js @@ -0,0 +1,22 @@ +import React, {PropTypes} from 'react'; +import {Link} from 'react-router'; + +const NoKapacitorError = React.createClass({ + propTypes: { + source: PropTypes.shape({ + id: PropTypes.string.isRequired, + }).isRequired, + }, + + render() { + const path = `/sources/${this.props.source.id}/kapacitor-config`; + return ( +
+

The current source does not have an associated Kapacitor instance, please configure one.

+ Add Kapacitor +
+ ); + }, +}); + +export default NoKapacitorError; diff --git a/ui/src/shared/components/PermissionsTable.js b/ui/src/shared/components/PermissionsTable.js deleted file mode 100644 index 105ed42ee..000000000 --- a/ui/src/shared/components/PermissionsTable.js +++ /dev/null @@ -1,76 +0,0 @@ -import React, {PropTypes} from 'react'; - -const {arrayOf, shape, string} = PropTypes; - -const PermissionsTable = React.createClass({ - propTypes: { - permissions: PropTypes.arrayOf(shape({ - name: string.isRequired, - displayName: string.isRequired, - description: string.isRequired, - resources: arrayOf(string.isRequired).isRequired, - })).isRequired, - showAddResource: PropTypes.bool, - onRemovePermission: PropTypes.func, - }, - - getDefaultProps() { - return { - permissions: [], - showAddResource: false, - }; - }, - - handleAddResourceClick() { - // TODO - }, - - handleRemovePermission(permission) { - this.props.onRemovePermission(permission); - }, - - render() { - if (!this.props.permissions.length) { - return ( -
- -

This Role has no Permissions

-
- ); - } - - return ( -
- - - {this.props.permissions.map((p) => ( - - - - {this.props.onRemovePermission ? ( - - ) : null} - - ))} - -
{p.displayName} - {p.resources.map((resource, i) =>
{resource === '' ? 'All Databases' : resource}
)} - {this.props.showAddResource ? ( -
- -
- ) : null} -
- -
-
- ); - }, -}); - -export default PermissionsTable; diff --git a/ui/src/shared/components/RolePanels.js b/ui/src/shared/components/RolePanels.js deleted file mode 100644 index c8ebfeedc..000000000 --- a/ui/src/shared/components/RolePanels.js +++ /dev/null @@ -1,86 +0,0 @@ -import React, {PropTypes} from 'react'; -import {Link} from 'react-router'; -import PermissionsTable from 'src/shared/components/PermissionsTable'; - -const {arrayOf, bool, func, shape, string} = PropTypes; - -const RolePanels = React.createClass({ - propTypes: { - roles: arrayOf(shape({ - name: string.isRequired, - users: arrayOf(string.isRequired).isRequired, - permissions: arrayOf(shape({ - name: string.isRequired, - displayName: string.isRequired, - description: string.isRequired, - resources: arrayOf(string.isRequired).isRequired, - })).isRequired, - })).isRequired, - showUserCount: bool, - onRemoveAccountFromRole: func, - }, - - getDefaultProps() { - return { - showUserCount: false, - }; - }, - - render() { - const {roles} = this.props; - - if (!roles.length) { - return ( -
-
-
- -

This user has no roles

-
-
-
- ); - } - - return ( -
- {roles.map((role) => { - const id = role.name.replace(/[^\w]/gi, ''); - return ( -
- -
- -
-
- ); - })} -
- ); - }, -}); - -export default RolePanels; diff --git a/ui/src/shared/components/UsersTable.js b/ui/src/shared/components/UsersTable.js deleted file mode 100644 index 656478e4b..000000000 --- a/ui/src/shared/components/UsersTable.js +++ /dev/null @@ -1,95 +0,0 @@ -import React, {PropTypes} from 'react'; -import {Link} from 'react-router'; -import classNames from 'classnames'; - -const {func, shape, arrayOf, string} = PropTypes; -const UsersTable = React.createClass({ - propTypes: { - users: arrayOf(shape({}).isRequired).isRequired, - activeCluster: string.isRequired, - onUserToDelete: func.isRequired, - me: shape({}).isRequired, - deleteText: string, - }, - - getDefaultProps() { - return { - deleteText: 'Delete', - }; - }, - - handleSelectUserToDelete(user) { - this.props.onUserToDelete(user); - }, - render() { - const {users, activeCluster, me} = this.props; - - if (!users.length) { - return ( -
- -

No users

-
- ); - } - - return ( - - - - - - - - - - { - users.map((user) => { - const isMe = me.id === user.id; - return ( - - - - - - - - ); - }) - } - -
NameAdminEmail
- - {user.name} - {isMe ? (You) : null} - - {this.renderAdminIcon(user.admin)}{user.email} - {this.renderDeleteButton(user)} -
- ); - }, - - renderAdminIcon(isAdmin) { - return ; - }, - - renderDeleteButton(user) { - if (this.props.me.id === user.id) { - return ; - } - - return ( - - ); - }, -}); - -export default UsersTable; diff --git a/ui/src/shared/reducers/index.js b/ui/src/shared/reducers/index.js new file mode 100644 index 000000000..2804e163b --- /dev/null +++ b/ui/src/shared/reducers/index.js @@ -0,0 +1,7 @@ +import me from './me'; +import notifications from './notifications'; + +export { + me, + notifications, +}; diff --git a/ui/src/shared/reducers/me.js b/ui/src/shared/reducers/me.js new file mode 100644 index 000000000..6fa8c00d3 --- /dev/null +++ b/ui/src/shared/reducers/me.js @@ -0,0 +1,17 @@ +function getInitialState() { + return {}; +} +const initialState = getInitialState(); + +export default function me(state = initialState, action) { + switch (action.type) { + case 'ME_RECEIVED': { + return action.payload.me; + } + case 'LOGOUT': { + return {}; + } + } + + return state; +} diff --git a/ui/src/side_nav/components/SideNav.js b/ui/src/side_nav/components/SideNav.js index 7720c1e5d..5b48ab8d7 100644 --- a/ui/src/side_nav/components/SideNav.js +++ b/ui/src/side_nav/components/SideNav.js @@ -1,20 +1,25 @@ import React, {PropTypes} from 'react'; import {NavBar, NavBlock, NavHeader, NavListItem} from 'src/side_nav/components/NavItems'; -const {string} = PropTypes; +const {string, shape} = PropTypes; const SideNav = React.createClass({ propTypes: { location: string.isRequired, sourceID: string.isRequired, explorationID: string, + me: shape({ + email: string.isRequired, + }), }, render() { - const {location, sourceID, explorationID} = this.props; + const {me, location, sourceID, explorationID} = this.props; const sourcePrefix = `/sources/${sourceID}`; const explorationSuffix = explorationID ? `/${explorationID}` : ''; const dataExplorerLink = `${sourcePrefix}/chronograf/data-explorer${explorationSuffix}`; + const loggedIn = !!(me && me.email); + return (
@@ -39,6 +44,11 @@ const SideNav = React.createClass({ InfluxDB Kapacitor + {loggedIn ? ( + + Logout + + ) : null} ); }, diff --git a/ui/src/side_nav/containers/SideNavApp.js b/ui/src/side_nav/containers/SideNavApp.js index 3742556c5..f6ecb4f14 100644 --- a/ui/src/side_nav/containers/SideNavApp.js +++ b/ui/src/side_nav/containers/SideNavApp.js @@ -1,41 +1,38 @@ import React, {PropTypes} from 'react'; +import {connect} from 'react-redux'; import SideNav from '../components/SideNav'; -const {func, string} = PropTypes; +const {func, string, shape} = PropTypes; const SideNavApp = React.createClass({ propTypes: { currentLocation: string.isRequired, addFlashMessage: func.isRequired, sourceID: string.isRequired, explorationID: string, - }, - - contextTypes: { - canViewChronograf: PropTypes.bool, - }, - - getInitialState() { - return { - clusters: [], - clusterToUpdate: '', - }; + me: shape({ + email: string.isRequired, + }), }, render() { - const {currentLocation, sourceID, explorationID} = this.props; - const {canViewChronograf} = this.context; + const {me, currentLocation, sourceID, explorationID} = this.props; return ( ); }, }); -export default SideNavApp; +function mapStateToProps(state) { + return { + me: state.me, + }; +} + +export default connect(mapStateToProps)(SideNavApp); diff --git a/ui/src/sources/containers/CreateSource.js b/ui/src/sources/containers/CreateSource.js index 65451b8b7..5223e0aec 100644 --- a/ui/src/sources/containers/CreateSource.js +++ b/ui/src/sources/containers/CreateSource.js @@ -22,6 +22,7 @@ export const CreateSource = React.createClass({ username: this.sourceUser.value, password: this.sourcePassword.value, isDefault: true, + telegraf: this.sourceTelegraf.value, }; createSource(source).then(({data: sourceFromServer}) => { this.redirectToApp(sourceFromServer); @@ -71,7 +72,10 @@ export const CreateSource = React.createClass({ this.sourcePassword = r} className="form-control" id="password" type="password">
- +
+ + this.sourceTelegraf = r} className="form-control" id="telegraf" type="text" value="telegraf"> +
diff --git a/ui/src/sources/containers/SourceForm.js b/ui/src/sources/containers/SourceForm.js index 1a2bd91b9..3731bfafa 100644 --- a/ui/src/sources/containers/SourceForm.js +++ b/ui/src/sources/containers/SourceForm.js @@ -44,6 +44,7 @@ export const SourceForm = React.createClass({ username: this.sourceUsername.value, password: this.sourcePassword.value, 'default': this.sourceDefault.checked, + telegraf: this.sourceTelegraf.value, }); if (this.state.editMode) { updateSource(newSource).then(() => { @@ -117,6 +118,10 @@ export const SourceForm = React.createClass({ this.sourcePassword = r} className="form-control" id="password" onChange={this.onInputChange} value={source.password || ''}>
+
+ + this.sourceTelegraf = r} className="form-control" id="telegraf" onChange={this.onInputChange} value={source.telegraf || 'telegraf'}> +
this.sourceDefault = r} /> diff --git a/ui/src/store/configureStore.js b/ui/src/store/configureStore.js index 3544e94c1..0a7d35638 100644 --- a/ui/src/store/configureStore.js +++ b/ui/src/store/configureStore.js @@ -3,11 +3,15 @@ import {combineReducers} from 'redux'; import thunkMiddleware from 'redux-thunk'; import makeQueryExecuter from 'src/shared/middleware/queryExecuter'; import * as chronografReducers from 'src/chronograf/reducers'; +import * as sharedReducers from 'src/shared/reducers'; import rulesReducer from 'src/kapacitor/reducers/rules'; -import notifications from 'src/shared/reducers/notifications'; import persistStateEnhancer from './persistStateEnhancer'; -const rootReducer = combineReducers({notifications, ...chronografReducers, rules: rulesReducer}); +const rootReducer = combineReducers({ + ...sharedReducers, + ...chronografReducers, + rules: rulesReducer, +}); export default function configureStore(initialState) { const createPersistentStore = compose( diff --git a/ui/src/utils/timeSeriesToDygraph.js b/ui/src/utils/timeSeriesToDygraph.js index 73e9bad1c..c70e96d8e 100644 --- a/ui/src/utils/timeSeriesToDygraph.js +++ b/ui/src/utils/timeSeriesToDygraph.js @@ -26,7 +26,7 @@ export default function timeSeriesToDygraph(raw = []) { */ const dateToFieldValue = {}; - raw.forEach(({identifier = '', response}) => { + raw.forEach(({response}) => { // If a response is an empty result set or a query returned an error // from InfluxDB, don't try and parse. if (response.results.length) { @@ -83,8 +83,7 @@ export default function timeSeriesToDygraph(raw = []) { }).sort().join(''); columns.slice(1).forEach((fieldName) => { - const identString = identifier ? `(${identifier})` : ''; - const effectiveFieldName = `${measurementName}.${fieldName}${tags}${identString}`; + const effectiveFieldName = `${measurementName}.${fieldName}${tags}`; // Given a field name, identify which column in the timeSeries result should hold the field's value // ex given this timeSeries [Date, 10, 20, 30] field index at 2 would correspond to value 20 @@ -112,8 +111,7 @@ export default function timeSeriesToDygraph(raw = []) { } const fieldName = columns[index]; - const identString = identifier ? `(${identifier})` : ''; - const effectiveFieldName = `${measurementName}.${fieldName}${tags}${identString}`; + const effectiveFieldName = `${measurementName}.${fieldName}${tags}`; dateToFieldValue[dateString][effectiveFieldName] = value; }); }