Update to fix go linter issues

pull/966/head
Chris Goller 2017-03-06 10:11:52 -06:00
parent 4abcb91e12
commit 8dc012aeac
24 changed files with 70 additions and 37 deletions

View File

@ -11,8 +11,10 @@ import (
// Ensure AlertsStore implements chronograf.AlertsStore. // Ensure AlertsStore implements chronograf.AlertsStore.
var _ chronograf.AlertRulesStore = &AlertsStore{} var _ chronograf.AlertRulesStore = &AlertsStore{}
// AlertsBucket is the name of the bucket alert configuration is stored in
var AlertsBucket = []byte("Alerts") var AlertsBucket = []byte("Alerts")
// AlertsStore represents the bolt implementation of a store for alerts
type AlertsStore struct { type AlertsStore struct {
client *Client client *Client
} }

View File

@ -23,6 +23,7 @@ type Client struct {
DashboardsStore *DashboardsStore DashboardsStore *DashboardsStore
} }
// NewClient initializes all stores
func NewClient() *Client { func NewClient() *Client {
c := &Client{Now: time.Now} c := &Client{Now: time.Now}
c.SourcesStore = &SourcesStore{client: c} c.SourcesStore = &SourcesStore{client: c}
@ -79,6 +80,7 @@ func (c *Client) Open() error {
return nil return nil
} }
// Close the connection to the bolt database
func (c *Client) Close() error { func (c *Client) Close() error {
if c.db != nil { if c.db != nil {
return c.db.Close() return c.db.Close()

View File

@ -12,8 +12,10 @@ import (
// Ensure DashboardsStore implements chronograf.DashboardsStore. // Ensure DashboardsStore implements chronograf.DashboardsStore.
var _ chronograf.DashboardsStore = &DashboardsStore{} var _ chronograf.DashboardsStore = &DashboardsStore{}
// DashboardBucket is the bolt bucket dashboards are stored in
var DashboardBucket = []byte("Dashoard") var DashboardBucket = []byte("Dashoard")
// DashboardsStore is the bolt implementation of storing dashboards
type DashboardsStore struct { type DashboardsStore struct {
client *Client client *Client
IDs chronograf.DashboardID IDs chronograf.DashboardID
@ -81,9 +83,9 @@ func (d *DashboardsStore) Get(ctx context.Context, id chronograf.DashboardID) (c
} }
// Delete the dashboard from DashboardsStore // Delete the dashboard from DashboardsStore
func (s *DashboardsStore) Delete(ctx context.Context, d chronograf.Dashboard) error { func (d *DashboardsStore) Delete(ctx context.Context, dash chronograf.Dashboard) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error { if err := d.client.db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket(DashboardBucket).Delete(itob(int(d.ID))); err != nil { if err := tx.Bucket(DashboardBucket).Delete(itob(int(dash.ID))); err != nil {
return err return err
} }
return nil return nil
@ -95,16 +97,16 @@ func (s *DashboardsStore) Delete(ctx context.Context, d chronograf.Dashboard) er
} }
// Update the dashboard in DashboardsStore // Update the dashboard in DashboardsStore
func (s *DashboardsStore) Update(ctx context.Context, d chronograf.Dashboard) error { func (d *DashboardsStore) Update(ctx context.Context, dash chronograf.Dashboard) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error { if err := d.client.db.Update(func(tx *bolt.Tx) error {
// Get an existing dashboard with the same ID. // Get an existing dashboard with the same ID.
b := tx.Bucket(DashboardBucket) b := tx.Bucket(DashboardBucket)
strID := strconv.Itoa(int(d.ID)) strID := strconv.Itoa(int(dash.ID))
if v := b.Get([]byte(strID)); v == nil { if v := b.Get([]byte(strID)); v == nil {
return chronograf.ErrDashboardNotFound return chronograf.ErrDashboardNotFound
} }
if v, err := internal.MarshalDashboard(d); err != nil { if v, err := internal.MarshalDashboard(dash); err != nil {
return err return err
} else if err := b.Put([]byte(strID), v); err != nil { } else if err := b.Put([]byte(strID), v); err != nil {
return err return err

View File

@ -11,8 +11,10 @@ import (
// Ensure LayoutStore implements chronograf.LayoutStore. // Ensure LayoutStore implements chronograf.LayoutStore.
var _ chronograf.LayoutStore = &LayoutStore{} var _ chronograf.LayoutStore = &LayoutStore{}
// LayoutBucket is the bolt bucket layouts are stored in
var LayoutBucket = []byte("Layout") var LayoutBucket = []byte("Layout")
// LayoutStore is the bolt implementation to store layouts
type LayoutStore struct { type LayoutStore struct {
client *Client client *Client
IDs chronograf.ID IDs chronograf.ID

View File

@ -11,8 +11,11 @@ import (
// Ensure ServersStore implements chronograf.ServersStore. // Ensure ServersStore implements chronograf.ServersStore.
var _ chronograf.ServersStore = &ServersStore{} var _ chronograf.ServersStore = &ServersStore{}
// ServersBucket is the bolt bucket to store lists of servers
var ServersBucket = []byte("Servers") var ServersBucket = []byte("Servers")
// ServersStore is the bolt implementation to store servers in a store.
// Used store servers that are associated in some way with a source
type ServersStore struct { type ServersStore struct {
client *Client client *Client
} }

View File

@ -11,8 +11,10 @@ import (
// Ensure SourcesStore implements chronograf.SourcesStore. // Ensure SourcesStore implements chronograf.SourcesStore.
var _ chronograf.SourcesStore = &SourcesStore{} var _ chronograf.SourcesStore = &SourcesStore{}
// SourcesBucket is the bolt bucket used to store source information
var SourcesBucket = []byte("Sources") var SourcesBucket = []byte("Sources")
// SourcesStore is a bolt implementation to store time-series source information.
type SourcesStore struct { type SourcesStore struct {
client *Client client *Client
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/influxdata/chronograf" "github.com/influxdata/chronograf"
) )
// AppExt is the the file extension searched for in the directory for layout files
const AppExt = ".json" const AppExt = ".json"
// Apps are canned JSON layouts. Implements LayoutStore. // Apps are canned JSON layouts. Implements LayoutStore.
@ -25,6 +26,7 @@ type Apps struct {
Logger chronograf.Logger Logger chronograf.Logger
} }
// NewApps constructs a layout store wrapping a file system directory
func NewApps(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.LayoutStore { func NewApps(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.LayoutStore {
return &Apps{ return &Apps{
Dir: dir, Dir: dir,
@ -63,14 +65,14 @@ func createLayout(file string, layout chronograf.Layout) error {
defer h.Close() defer h.Close()
if octets, err := json.MarshalIndent(layout, " ", " "); err != nil { if octets, err := json.MarshalIndent(layout, " ", " "); err != nil {
return chronograf.ErrLayoutInvalid return chronograf.ErrLayoutInvalid
} else { } else if _, err := h.Write(octets); err != nil {
if _, err := h.Write(octets); err != nil { return err
return err
}
} }
return nil return nil
} }
// All returns all layouts from the directory
func (a *Apps) All(ctx context.Context) ([]chronograf.Layout, error) { func (a *Apps) All(ctx context.Context) ([]chronograf.Layout, error) {
files, err := a.ReadDir(a.Dir) files, err := a.ReadDir(a.Dir)
if err != nil { if err != nil {
@ -91,6 +93,7 @@ func (a *Apps) All(ctx context.Context) ([]chronograf.Layout, error) {
return layouts, nil return layouts, nil
} }
// Add creates a new layout within the directory
func (a *Apps) Add(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error) { func (a *Apps) Add(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error) {
var err error var err error
layout.ID, err = a.IDs.Generate() layout.ID, err = a.IDs.Generate()
@ -118,6 +121,7 @@ func (a *Apps) Add(ctx context.Context, layout chronograf.Layout) (chronograf.La
return layout, nil return layout, nil
} }
// Delete removes a layout file from the directory
func (a *Apps) Delete(ctx context.Context, layout chronograf.Layout) error { func (a *Apps) Delete(ctx context.Context, layout chronograf.Layout) error {
_, file, err := a.idToFile(layout.ID) _, file, err := a.idToFile(layout.ID)
if err != nil { if err != nil {
@ -134,6 +138,7 @@ func (a *Apps) Delete(ctx context.Context, layout chronograf.Layout) error {
return nil return nil
} }
// Get returns an app file from the layout directory
func (a *Apps) Get(ctx context.Context, ID string) (chronograf.Layout, error) { func (a *Apps) Get(ctx context.Context, ID string) (chronograf.Layout, error) {
l, file, err := a.idToFile(ID) l, file, err := a.idToFile(ID)
if err != nil { if err != nil {
@ -157,6 +162,7 @@ func (a *Apps) Get(ctx context.Context, ID string) (chronograf.Layout, error) {
return l, nil return l, nil
} }
// Update replaces a layout from the file system directory
func (a *Apps) Update(ctx context.Context, layout chronograf.Layout) error { func (a *Apps) Update(ctx context.Context, layout chronograf.Layout) error {
l, _, err := a.idToFile(layout.ID) l, _, err := a.idToFile(layout.ID)
if err != nil { if err != nil {

View File

@ -10,6 +10,7 @@ import (
//go:generate go-bindata -o bin_gen.go -ignore README|apps|.sh|go -pkg canned . //go:generate go-bindata -o bin_gen.go -ignore README|apps|.sh|go -pkg canned .
// BinLayoutStore represents a layout store using data generated by go-bindata
type BinLayoutStore struct { type BinLayoutStore struct {
Logger chronograf.Logger Logger chronograf.Logger
} }

1
dist/dir.go vendored
View File

@ -11,6 +11,7 @@ type Dir struct {
dir http.Dir dir http.Dir
} }
// NewDir constructs a Dir with a default file
func NewDir(dir, def string) Dir { func NewDir(dir, def string) Dir {
return Dir{ return Dir{
Default: def, Default: def,

View File

@ -148,7 +148,7 @@ func (c *Client) Roles(ctx context.Context) (chronograf.RolesStore, error) {
return c.RolesStore, nil return c.RolesStore, nil
} }
// Allowances returns all Influx Enterprise permission strings // Permissions returns all Influx Enterprise permission strings
func (c *Client) Permissions(context.Context) chronograf.Permissions { func (c *Client) Permissions(context.Context) chronograf.Permissions {
all := chronograf.Allowances{ all := chronograf.Allowances{
"NoPermissions", "NoPermissions",

View File

@ -70,7 +70,7 @@ func TestMetaClient_ShowCluster(t *testing.T) {
http.StatusBadGateway, http.StatusBadGateway,
nil, nil,
nil, nil,
fmt.Errorf("Time circuits on. Flux Capacitor... fluxxing."), fmt.Errorf("time circuits on. Flux Capacitor... fluxxing"),
), ),
}, },
wantErr: true, wantErr: true,
@ -214,7 +214,7 @@ func TestMetaClient_Users(t *testing.T) {
http.StatusOK, http.StatusOK,
[]byte(`{"users":[{"name":"admin","hash":"1234","permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`), []byte(`{"users":[{"name":"admin","hash":"1234","permissions":{"":["ViewAdmin","ViewChronograf"]}}]}`),
nil, nil,
fmt.Errorf("Time circuits on. Flux Capacitor... fluxxing."), fmt.Errorf("time circuits on. Flux Capacitor... fluxxing"),
), ),
}, },
args: args{ args: args{

View File

@ -6,8 +6,10 @@ import (
"github.com/influxdata/chronograf" "github.com/influxdata/chronograf"
) )
// HTTPEndpoint is the default location of the tickscript output
const HTTPEndpoint = "output" const HTTPEndpoint = "output"
// HTTPOut adds a kapacitor httpOutput to a tickscript
func HTTPOut(rule chronograf.AlertRule) (string, error) { func HTTPOut(rule chronograf.AlertRule) (string, error) {
return fmt.Sprintf(`trigger|httpOut('%s')`, HTTPEndpoint), nil return fmt.Sprintf(`trigger|httpOut('%s')`, HTTPEndpoint), nil
} }

View File

@ -3,8 +3,8 @@ package kapacitor
import "fmt" import "fmt"
const ( const (
GreaterThan = "greater than" greaterThan = "greater than"
LessThan = "less than" lessThan = "less than"
LessThanEqual = "equal to or less than" LessThanEqual = "equal to or less than"
GreaterThanEqual = "equal to or greater" GreaterThanEqual = "equal to or greater"
Equal = "equal to" Equal = "equal to"
@ -16,9 +16,9 @@ const (
// kapaOperator converts UI strings to kapacitor operators // kapaOperator converts UI strings to kapacitor operators
func kapaOperator(operator string) (string, error) { func kapaOperator(operator string) (string, error) {
switch operator { switch operator {
case GreaterThan: case greaterThan:
return ">", nil return ">", nil
case LessThan: case lessThan:
return "<", nil return "<", nil
case LessThanEqual: case LessThanEqual:
return "<=", nil return "<=", nil

View File

@ -41,6 +41,9 @@ func validateTick(script chronograf.TICKScript) error {
return err return err
} }
// deadman is an empty implementation of a kapacitor DeadmanService to allow CreatePipeline
var _ pipeline.DeadmanService = &deadman{}
type deadman struct { type deadman struct {
interval time.Duration interval time.Duration
threshold float64 threshold float64

View File

@ -41,17 +41,16 @@ func Vars(rule chronograf.AlertRule) (string, error) {
var crit = %s var crit = %s
` `
return fmt.Sprintf(vars, common, formatValue(rule.TriggerValues.Value)), nil return fmt.Sprintf(vars, common, formatValue(rule.TriggerValues.Value)), nil
} else { }
vars := ` vars := `
%s %s
var lower = %s var lower = %s
var upper = %s var upper = %s
` `
return fmt.Sprintf(vars, return fmt.Sprintf(vars,
common, common,
rule.TriggerValues.Value, rule.TriggerValues.Value,
rule.TriggerValues.RangeValue), nil rule.TriggerValues.RangeValue), nil
}
case Relative: case Relative:
vars := ` vars := `
%s %s

View File

@ -81,6 +81,7 @@ func (ll *logrusLogger) WithField(key string, value interface{}) chronograf.Logg
return &logrusLogger{ll.l.WithField(key, value)} return &logrusLogger{ll.l.WithField(key, value)}
} }
// New wraps a logrus Logger
func New(l Level) chronograf.Logger { func New(l Level) chronograf.Logger {
logger := &logrus.Logger{ logger := &logrus.Logger{
Out: os.Stderr, Out: os.Stderr,

View File

@ -1,4 +1,4 @@
// The oauth2 package provides http.Handlers necessary for implementing Oauth2 // Package oauth2 provides http.Handlers necessary for implementing Oauth2
// authentication with multiple Providers. // authentication with multiple Providers.
// //
// This is how the pieces of this package fit together: // This is how the pieces of this package fit together:

View File

@ -10,7 +10,7 @@ import (
goauth2 "google.golang.org/api/oauth2/v2" goauth2 "google.golang.org/api/oauth2/v2"
) )
// Endpoint is Google's OAuth 2.0 endpoint. // GoogleEndpoint is Google's OAuth 2.0 endpoint.
// Copied here to remove tons of package dependencies // Copied here to remove tons of package dependencies
var GoogleEndpoint = oauth2.Endpoint{ var GoogleEndpoint = oauth2.Endpoint{
AuthURL: "https://accounts.google.com/o/oauth2/auth", AuthURL: "https://accounts.google.com/o/oauth2/auth",
@ -18,6 +18,7 @@ var GoogleEndpoint = oauth2.Endpoint{
} }
var _ Provider = &Google{} var _ Provider = &Google{}
// Google is an oauth2 provider supporting google.
type Google struct { type Google struct {
ClientID string ClientID string
ClientSecret string ClientSecret string

View File

@ -14,8 +14,8 @@ import (
var _ Provider = &Heroku{} var _ Provider = &Heroku{}
const ( const (
// Routes required for interacting with Heroku API // HerokuAccountRoute is required for interacting with Heroku API
HEROKU_ACCOUNT_ROUTE string = "https://api.heroku.com/account" HerokuAccountRoute string = "https://api.heroku.com/account"
) )
// Heroku is an OAuth2 Provider allowing users to authenticate with Heroku to // Heroku is an OAuth2 Provider allowing users to authenticate with Heroku to
@ -61,13 +61,14 @@ func (h *Heroku) PrincipalID(provider *http.Client) (string, error) {
DefaultOrganization DefaultOrg `json:"default_organization"` DefaultOrganization DefaultOrg `json:"default_organization"`
} }
resp, err := provider.Get(HEROKU_ACCOUNT_ROUTE) resp, err := provider.Get(HerokuAccountRoute)
if err != nil { if err != nil {
h.Logger.Error("Unable to communicate with Heroku. err:", err) h.Logger.Error("Unable to communicate with Heroku. err:", err)
return "", err return "", err
} }
defer resp.Body.Close() defer resp.Body.Close()
d := json.NewDecoder(resp.Body) d := json.NewDecoder(resp.Body)
var account Account var account Account
if err := d.Decode(&account); err != nil { if err := d.Decode(&account); err != nil {
h.Logger.Error("Unable to decode response from Heroku. err:", err) h.Logger.Error("Unable to decode response from Heroku. err:", err)
@ -83,9 +84,8 @@ func (h *Heroku) PrincipalID(provider *http.Client) (string, error) {
} }
h.Logger.Error(ErrOrgMembership) h.Logger.Error(ErrOrgMembership)
return "", ErrOrgMembership return "", ErrOrgMembership
} else {
return account.Email, nil
} }
return account.Email, nil
} }
// Scopes for heroku is "identity" which grants access to user account // Scopes for heroku is "identity" which grants access to user account

View File

@ -24,6 +24,7 @@ type cookie struct {
// Check to ensure CookieMux is an oauth2.Mux // Check to ensure CookieMux is an oauth2.Mux
var _ Mux = &CookieMux{} var _ Mux = &CookieMux{}
// NewCookieMux constructs a Mux handler that checks a cookie against the authenticator
func NewCookieMux(p Provider, a Authenticator, l chronograf.Logger) *CookieMux { func NewCookieMux(p Provider, a Authenticator, l chronograf.Logger) *CookieMux {
return &CookieMux{ return &CookieMux{
Provider: p, Provider: p,
@ -55,7 +56,7 @@ type CookieMux struct {
Now func() time.Time // Now returns the current time Now func() time.Time // Now returns the current time
} }
// Uses a Cookie with a random string as the state validation method. JWTs are // Login uses a Cookie with a random string as the state validation method. JWTs are
// a good choice here for encoding because they can be validated without // a good choice here for encoding because they can be validated without
// storing state. // storing state.
func (j *CookieMux) Login() http.Handler { func (j *CookieMux) Login() http.Handler {

View File

@ -27,8 +27,8 @@ func (mp *MockProvider) Config() *goauth.Config {
ClientID: "4815162342", ClientID: "4815162342",
ClientSecret: "8675309", ClientSecret: "8675309",
Endpoint: goauth.Endpoint{ Endpoint: goauth.Endpoint{
mp.ProviderURL + "/oauth/auth", AuthURL: mp.ProviderURL + "/oauth/auth",
mp.ProviderURL + "/oauth/token", TokenURL: mp.ProviderURL + "/oauth/token",
}, },
} }
} }

View File

@ -73,14 +73,17 @@ func provide(p oauth2.Provider, m oauth2.Mux, ok func() bool) func(func(oauth2.P
} }
} }
// UseGithub validates the CLI parameters to enable github oauth support
func (s *Server) UseGithub() bool { func (s *Server) UseGithub() bool {
return s.TokenSecret != "" && s.GithubClientID != "" && s.GithubClientSecret != "" return s.TokenSecret != "" && s.GithubClientID != "" && s.GithubClientSecret != ""
} }
// UseGoogle validates the CLI parameters to enable google oauth support
func (s *Server) UseGoogle() bool { func (s *Server) UseGoogle() bool {
return s.TokenSecret != "" && s.GoogleClientID != "" && s.GoogleClientSecret != "" && s.PublicURL != "" return s.TokenSecret != "" && s.GoogleClientID != "" && s.GoogleClientSecret != "" && s.PublicURL != ""
} }
// UseHeroku validates the CLI parameters to enable heroku oauth support
func (s *Server) UseHeroku() bool { func (s *Server) UseHeroku() bool {
return s.TokenSecret != "" && s.HerokuClientID != "" && s.HerokuSecret != "" return s.TokenSecret != "" && s.HerokuClientID != "" && s.HerokuSecret != ""
} }

View File

@ -62,7 +62,8 @@ func (wrw *wrapResponseWriter) Header() http.Header {
return *wrw.dupHeader return *wrw.dupHeader
} }
const CHUNK_SIZE int = 512 // ChunkSize is the number of bytes per chunked transfer-encoding
const ChunkSize int = 512
// ServeHTTP implements an http.Handler that prefixes relative URLs from the // ServeHTTP implements an http.Handler that prefixes relative URLs from the
// Next handler with the configured prefix. It does this by examining the // Next handler with the configured prefix. It does this by examining the
@ -109,7 +110,7 @@ func (up *URLPrefixer) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
writtenCount++ writtenCount++
buf.Write(src.Bytes()) buf.Write(src.Bytes())
if writtenCount >= CHUNK_SIZE { if writtenCount >= ChunkSize {
flusher.Flush() flusher.Flush()
writtenCount = 0 writtenCount = 0
} }

View File

@ -4,6 +4,7 @@ import (
"net/http" "net/http"
) )
// Version handler adds X-Chronograf-Version header to responses
func Version(version string, h http.Handler) http.Handler { func Version(version string, h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) { fn := func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Chronograf-Version", version) w.Header().Add("X-Chronograf-Version", version)