chore: delete more unused chronograf code (#21881)

pull/21889/head
Daniel Moran 2021-07-19 16:48:37 -04:00 committed by GitHub
parent 9ff953c400
commit 94738dbf34
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
91 changed files with 0 additions and 31008 deletions

View File

@ -1,120 +0,0 @@
package main
import (
"context"
"strings"
"github.com/influxdata/influxdb/v2/chronograf"
)
type AddCommand struct {
BoltPath string `short:"b" long:"bolt-path" description:"Full path to boltDB file (e.g. './chronograf-v1.db')" env:"BOLT_PATH" default:"chronograf-v1.db"`
ID *uint64 `short:"i" long:"id" description:"Users ID. Must be id for existing user"`
Username string `short:"n" long:"name" description:"Users name. Must be Oauth-able email address or username"`
Provider string `short:"p" long:"provider" description:"Name of the Auth provider (e.g. google, github, auth0, or generic)"`
Scheme string `short:"s" long:"scheme" description:"Authentication scheme that matches auth provider (e.g. oauth2)" default:"oauth2"`
Organizations string `short:"o" long:"orgs" description:"A comma separated list of organizations that the user should be added to" default:"default"`
}
var addCommand AddCommand
func (l *AddCommand) Execute(args []string) error {
c, err := NewBoltClient(l.BoltPath)
if err != nil {
return err
}
defer c.Close()
q := chronograf.UserQuery{
Name: &l.Username,
Provider: &l.Provider,
Scheme: &l.Scheme,
}
if l.ID != nil {
q.ID = l.ID
}
ctx := context.Background()
user, err := c.UsersStore.Get(ctx, q)
if err != nil && err != chronograf.ErrUserNotFound {
return err
} else if err == chronograf.ErrUserNotFound {
user = &chronograf.User{
Name: l.Username,
Provider: l.Provider,
Scheme: l.Scheme,
Roles: []chronograf.Role{
{
Name: "member",
Organization: "default",
},
},
SuperAdmin: true,
}
user, err = c.UsersStore.Add(ctx, user)
if err != nil {
return err
}
} else {
user.SuperAdmin = true
if len(user.Roles) == 0 {
user.Roles = []chronograf.Role{
{
Name: "member",
Organization: "default",
},
}
}
if err = c.UsersStore.Update(ctx, user); err != nil {
return err
}
}
// TODO(desa): Apply mapping to user and update their roles
roles := []chronograf.Role{}
OrgLoop:
for _, org := range strings.Split(l.Organizations, ",") {
// Check to see is user is already a part of the organization
for _, r := range user.Roles {
if r.Organization == org {
continue OrgLoop
}
}
orgQuery := chronograf.OrganizationQuery{
ID: &org,
}
o, err := c.OrganizationsStore.Get(ctx, orgQuery)
if err != nil {
return err
}
role := chronograf.Role{
Organization: org,
Name: o.DefaultRole,
}
roles = append(roles, role)
}
user.Roles = append(user.Roles, roles...)
if err = c.UsersStore.Update(ctx, user); err != nil {
return err
}
w := NewTabWriter()
WriteHeaders(w)
WriteUser(w, user)
w.Flush()
return nil
}
func init() {
parser.AddCommand("add-superadmin",
"Creates a new superadmin user",
"The add-user command will create a new user with superadmin status",
&addCommand)
}

View File

@ -1,41 +0,0 @@
package main
import (
"context"
)
type ListCommand struct {
BoltPath string `short:"b" long:"bolt-path" description:"Full path to boltDB file (e.g. './chronograf-v1.db')" env:"BOLT_PATH" default:"chronograf-v1.db"`
}
var listCommand ListCommand
func (l *ListCommand) Execute(args []string) error {
c, err := NewBoltClient(l.BoltPath)
if err != nil {
return err
}
defer c.Close()
ctx := context.Background()
users, err := c.UsersStore.All(ctx)
if err != nil {
return err
}
w := NewTabWriter()
WriteHeaders(w)
for _, user := range users {
WriteUser(w, &user)
}
w.Flush()
return nil
}
func init() {
parser.AddCommand("list-users",
"Lists users",
"The list-users command will list all users in the chronograf boltdb instance",
&listCommand)
}

View File

@ -1,27 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/jessevdk/go-flags"
)
type Options struct {
}
var options Options
var parser = flags.NewParser(&options, flags.Default)
func main() {
if _, err := parser.Parse(); err != nil {
if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {
os.Exit(0)
} else {
fmt.Fprintln(os.Stdout)
parser.WriteHelp(os.Stdout)
os.Exit(1)
}
}
}

View File

@ -1,44 +0,0 @@
package main
import (
"context"
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
func NewBoltClient(path string) (*bolt.Client, error) {
c := bolt.NewClient()
c.Path = path
ctx := context.Background()
logger := mocks.NewLogger()
var bi chronograf.BuildInfo
if err := c.Open(ctx, logger, bi); err != nil {
return nil, err
}
return c, nil
}
func NewTabWriter() *tabwriter.Writer {
return tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
}
func WriteHeaders(w io.Writer) {
fmt.Fprintln(w, "ID\tName\tProvider\tScheme\tSuperAdmin\tOrganization(s)")
}
func WriteUser(w io.Writer, user *chronograf.User) {
orgs := []string{}
for _, role := range user.Roles {
orgs = append(orgs, role.Organization)
}
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%t\t%s\n", user.ID, user.Name, user.Provider, user.Scheme, user.SuperAdmin, strings.Join(orgs, ","))
}

View File

@ -1,50 +0,0 @@
package main
import (
"context"
"log"
"os"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/server"
flags "github.com/jessevdk/go-flags"
)
// Build flags
var (
version = ""
commit = ""
)
func main() {
srv := server.Server{
BuildInfo: chronograf.BuildInfo{
Version: version,
Commit: commit,
},
}
parser := flags.NewParser(&srv, flags.Default)
parser.ShortDescription = `Chronograf`
parser.LongDescription = `Options for Chronograf`
if _, err := parser.Parse(); err != nil {
code := 1
if fe, ok := err.(*flags.Error); ok {
if fe.Type == flags.ErrHelp {
code = 0
}
}
os.Exit(code)
}
if srv.ShowVersion {
log.Printf("Chronograf %s (git: %s)\n", version, commit)
os.Exit(0)
}
ctx := context.Background()
if err := srv.Serve(ctx); err != nil {
log.Fatalln(err)
}
}

View File

@ -1,26 +0,0 @@
# List any generated files here
TARGETS = dist_gen.go
# List any source files used to generate the targets here
SOURCES = dist.go $(shell find ../../ui/build -type f)
# List any directories that have their own Makefile here
SUBDIRS =
# Default target
all: $(SUBDIRS) $(TARGETS)
# Recurse into subdirs for same make goal
$(SUBDIRS):
$(MAKE) -C $@ $(MAKECMDGOALS)
# Clean all targets recursively
clean: $(SUBDIRS)
rm -f $(TARGETS)
# Define go generate if not already defined
GO_GENERATE := go generate
# Run go generate for the targets
$(TARGETS): $(SOURCES)
$(GO_GENERATE) -x
.PHONY: all clean $(SUBDIRS)

View File

@ -1,26 +0,0 @@
package dist
import (
"errors"
"os"
)
// The functions defined in this file are placeholders
// when the binary is compiled without assets.
var errNoAssets = errors.New("no assets included in binary")
// Asset returns an error stating no assets were included in the binary.
func Asset(string) ([]byte, error) {
return nil, errNoAssets
}
// AssetInfo returns an error stating no assets were included in the binary.
func AssetInfo(name string) (os.FileInfo, error) {
return nil, errNoAssets
}
// AssetDir returns nil because there are no assets included in the binary.
func AssetDir(name string) ([]string, error) {
return nil, errNoAssets
}

View File

@ -1,33 +0,0 @@
package dist
import (
"net/http"
"os"
)
// Dir functions like http.Dir except returns the content of a default file if not found.
type Dir struct {
Default string
dir http.Dir
}
// NewDir constructs a Dir with a default file
func NewDir(dir, def string) Dir {
return Dir{
Default: def,
dir: http.Dir(dir),
}
}
// Open will return the file in the dir if it exists, or, the Default file otherwise.
func (d Dir) Open(name string) (http.File, error) {
f, err := d.dir.Open(name)
if err != nil {
f, err = os.Open(d.Default)
if err != nil {
return nil, err
}
return f, nil
}
return f, err
}

View File

@ -1,88 +0,0 @@
package dist
//go:generate env GO111MODULE=on go run github.com/kevinburke/go-bindata/go-bindata -o dist_gen.go -ignore 'map|go' -tags assets -pkg dist ../../ui/build/...
import (
"fmt"
"net/http"
assetfs "github.com/elazarl/go-bindata-assetfs"
)
// DebugAssets serves assets via a specified directory
type DebugAssets struct {
Dir string // Dir is a directory location of asset files
Default string // Default is the file to serve if file is not found.
}
// Handler is an http.FileServer for the Dir
func (d *DebugAssets) Handler() http.Handler {
return http.FileServer(NewDir(d.Dir, d.Default))
}
// BindataAssets serves assets from go-bindata, but, also serves Default if assent doesn't exist
// This is to support single-page react-apps with its own router.
type BindataAssets struct {
Prefix string // Prefix is prepended to the http file request
Default string // Default is the file to serve if the file is not found
DefaultContentType string // DefaultContentType is the content type of the default file
}
// Handler serves go-bindata using a go-bindata-assetfs façade
func (b *BindataAssets) Handler() http.Handler {
return b
}
// addCacheHeaders requests an hour of Cache-Control and sets an ETag based on file size and modtime
func (b *BindataAssets) addCacheHeaders(filename string, w http.ResponseWriter) error {
w.Header().Add("Cache-Control", "public, max-age=3600")
fi, err := AssetInfo(filename)
if err != nil {
return err
}
hour, minute, second := fi.ModTime().Clock()
etag := fmt.Sprintf(`"%d%d%d%d%d"`, fi.Size(), fi.ModTime().Day(), hour, minute, second)
w.Header().Set("ETag", etag)
return nil
}
// ServeHTTP wraps http.FileServer by returning a default asset if the asset
// doesn't exist. This supports single-page react-apps with its own
// built-in router. Additionally, we override the content-type if the
// Default file is used.
func (b *BindataAssets) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// def wraps the assets to return the default file if the file doesn't exist
def := func(name string) ([]byte, error) {
// If the named asset exists, then return it directly.
octets, err := Asset(name)
if err != nil {
// If this is at / then we just error out so we can return a Directory
// This directory will then be redirected by go to the /index.html
if name == b.Prefix {
return nil, err
}
// If this is anything other than slash, we just return the default
// asset. This default asset will handle the routing.
// Additionally, because we know we are returning the default asset,
// we need to set the default asset's content-type.
w.Header().Set("Content-Type", b.DefaultContentType)
if err := b.addCacheHeaders(b.Default, w); err != nil {
return nil, err
}
return Asset(b.Default)
}
if err := b.addCacheHeaders(name, w); err != nil {
return nil, err
}
return octets, nil
}
var dir http.FileSystem = &assetfs.AssetFS{
Asset: def,
AssetDir: AssetDir,
AssetInfo: AssetInfo,
Prefix: b.Prefix,
}
http.FileServer(dir).ServeHTTP(w, r)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
{
"id": "5000",
"srcID": "5000",
"name": "Kapa 1",
"url": "http://localhost:9092",
"active": true,
"organization": "howdy"
}

View File

@ -1,5 +0,0 @@
{
"id": "howdy",
"name": "An Organization",
"defaultRole": "viewer"
}

View File

@ -1,14 +0,0 @@
{
"id": "5000",
"name": "Influx 1",
"username": "user1",
"password": "pass1",
"url": "http://localhost:8086",
"metaUrl": "http://metaurl.com",
"type": "influx-enterprise",
"insecureSkipVerify": false,
"default": true,
"telegraf": "telegraf",
"sharedSecret": "cubeapples",
"organization": "howdy"
}

View File

@ -1,189 +0,0 @@
{
"id": 1000,
"cells": [
{
"i": "8f61c619-dd9b-4761-8aa8-577f27247093",
"x": 0,
"y": 0,
"w": 11,
"h": 5,
"name": "Untitled Cell",
"queries": [
{
"query": "SELECT mean(\"value\") AS \"mean_value\" FROM \"telegraf\".\"autogen\".\"cpg\" WHERE time \u003e :dashboardTime: GROUP BY time(:interval:) FILL(null)",
"queryConfig": {
"id": "b20baa61-bacb-4a17-b27d-b904a0d18114",
"database": "telegraf",
"measurement": "cpg",
"retentionPolicy": "autogen",
"fields": [
{
"value": "mean",
"type": "func",
"alias": "mean_value",
"args": [
{
"value": "value",
"type": "field",
"alias": ""
}
]
}
],
"tags": {},
"groupBy": {
"time": "auto",
"tags": []
},
"areTagsAccepted": true,
"fill": "null",
"rawText": null,
"range": null,
"shifts": []
},
"source": "/chronograf/v1/sources/2"
}
],
"axes": {
"x": {
"bounds": [],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y": {
"bounds": [],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y2": {
"bounds": [],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
}
},
"type": "line",
"colors": [
{
"id": "0",
"type": "min",
"hex": "#00C9FF",
"name": "laser",
"value": "0"
},
{
"id": "1",
"type": "max",
"hex": "#9394FF",
"name": "comet",
"value": "100"
}
],
"legend": {
"type": "static",
"orientation": "bottom"
}
}
],
"templates": [
{
"tempVar": ":dbs:",
"values": [
{
"value": "_internal",
"type": "database",
"selected": true
},
{
"value": "telegraf",
"type": "database",
"selected": false
},
{
"value": "tensorflowdb",
"type": "database",
"selected": false
},
{
"value": "pushgateway",
"type": "database",
"selected": false
},
{
"value": "node_exporter",
"type": "database",
"selected": false
},
{
"value": "mydb",
"type": "database",
"selected": false
},
{
"value": "tiny",
"type": "database",
"selected": false
},
{
"value": "blah",
"type": "database",
"selected": false
},
{
"value": "test",
"type": "database",
"selected": false
},
{
"value": "chronograf",
"type": "database",
"selected": false
},
{
"value": "db_name",
"type": "database",
"selected": false
},
{
"value": "demo",
"type": "database",
"selected": false
},
{
"value": "eeg",
"type": "database",
"selected": false
},
{
"value": "solaredge",
"type": "database",
"selected": false
},
{
"value": "zipkin",
"type": "database",
"selected": false
}
],
"id": "e7e498bf-5869-4874-9071-24628a2cda63",
"type": "databases",
"label": "",
"query": {
"influxql": "SHOW DATABASES",
"measurement": "",
"tagKey": "",
"fieldKey": ""
}
}
],
"name": "Name This Dashboard",
"organization": "howdy"
}

View File

@ -1,54 +0,0 @@
package integrations
import (
"encoding/json"
"io/ioutil"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"github.com/google/go-cmp/cmp"
)
func hostAndPort() (string, int) {
s := httptest.NewServer(nil)
defer s.Close()
u, err := url.Parse(s.URL)
if err != nil {
panic(err)
}
xs := strings.Split(u.Host, ":")
host := xs[0]
portStr := xs[1]
port, err := strconv.Atoi(portStr)
if err != nil {
panic(err)
}
return host, port
}
func newBoltFile() string {
f, err := ioutil.TempFile("", "chronograf-bolt-")
if err != nil {
panic(err)
}
f.Close()
return f.Name()
}
func jsonEqual(s1, s2 string) (eq bool, err error) {
var o1, o2 interface{}
if err = json.Unmarshal([]byte(s1), &o1); err != nil {
return
}
if err = json.Unmarshal([]byte(s2), &o2); err != nil {
return
}
return cmp.Equal(o1, o2), nil
}

View File

@ -1,26 +0,0 @@
# List any generated files here
TARGETS = swagger_gen.go
# List any source files used to generate the targets here
SOURCES = swagger.json swagger.go
# List any directories that have their own Makefile here
SUBDIRS =
# Default target
all: $(SUBDIRS) $(TARGETS)
# Recurse into subdirs for same make goal
$(SUBDIRS):
$(MAKE) -C $@ $(MAKECMDGOALS)
# Clean all targets recursively
clean: $(SUBDIRS)
rm -f $(TARGETS)
# Define go generate if not already defined
GO_GENERATE := go generate
# Run go generate for the targets
$(TARGETS): $(SOURCES)
$(GO_GENERATE) -x
.PHONY: all clean $(SUBDIRS)

View File

@ -1,13 +0,0 @@
package server
import (
"errors"
)
// The functions defined in this file are placeholders when the binary is compiled
// without assets.
// Asset returns an error stating no assets were included in the binary.
func Asset(string) ([]byte, error) {
return nil, errors.New("no assets included in binary")
}

View File

@ -1,452 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/influx"
)
const (
since = "since"
until = "until"
timeMilliFormat = "2006-01-02T15:04:05.999Z07:00"
)
type annotationLinks struct {
Self string `json:"self"` // Self link mapping to this resource
}
type annotationResponse struct {
ID string `json:"id"` // ID is the unique annotation identifier
StartTime string `json:"startTime"` // StartTime in RFC3339 of the start of the annotation
EndTime string `json:"endTime"` // EndTime in RFC3339 of the end of the annotation
Text string `json:"text"` // Text is the associated user-facing text describing the annotation
Type string `json:"type"` // Type describes the kind of annotation
Links annotationLinks `json:"links"`
}
func newAnnotationResponse(src chronograf.Source, a *chronograf.Annotation) annotationResponse {
base := "/chronograf/v1/sources"
res := annotationResponse{
ID: a.ID,
StartTime: a.StartTime.UTC().Format(timeMilliFormat),
EndTime: a.EndTime.UTC().Format(timeMilliFormat),
Text: a.Text,
Type: a.Type,
Links: annotationLinks{
Self: fmt.Sprintf("%s/%d/annotations/%s", base, src.ID, a.ID),
},
}
if a.EndTime.IsZero() {
res.EndTime = ""
}
return res
}
type annotationsResponse struct {
Annotations []annotationResponse `json:"annotations"`
}
func newAnnotationsResponse(src chronograf.Source, as []chronograf.Annotation) annotationsResponse {
annotations := make([]annotationResponse, len(as))
for i, a := range as {
annotations[i] = newAnnotationResponse(src, &a)
}
return annotationsResponse{
Annotations: annotations,
}
}
func validAnnotationQuery(query url.Values) (startTime, stopTime time.Time, err error) {
start := query.Get(since)
if start == "" {
return time.Time{}, time.Time{}, fmt.Errorf("since parameter is required")
}
startTime, err = time.Parse(timeMilliFormat, start)
if err != nil {
return
}
// if until isn't stated, the default time is now
stopTime = time.Now()
stop := query.Get(until)
if stop != "" {
stopTime, err = time.Parse(timeMilliFormat, stop)
if err != nil {
return time.Time{}, time.Time{}, err
}
}
if startTime.After(stopTime) {
startTime, stopTime = stopTime, startTime
}
return startTime, stopTime, nil
}
// Annotations returns all annotations within the annotations store
func (s *Service) Annotations(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
start, stop, err := validAnnotationQuery(r.URL.Query())
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
src, err := s.Store.Sources(ctx).Get(ctx, id)
if err != nil {
notFound(w, id, s.Logger)
return
}
ts, err := s.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
store := influx.NewAnnotationStore(ts)
annotations, err := store.All(ctx, start, stop)
if err != nil {
msg := fmt.Errorf("error loading annotations: %v", err)
unknownErrorWithMessage(w, msg, s.Logger)
return
}
res := newAnnotationsResponse(src, annotations)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// Annotation returns a specified annotation id within the annotations store
func (s *Service) Annotation(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
annoID, err := paramStr("aid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
src, err := s.Store.Sources(ctx).Get(ctx, id)
if err != nil {
notFound(w, id, s.Logger)
return
}
ts, err := s.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
store := influx.NewAnnotationStore(ts)
anno, err := store.Get(ctx, annoID)
if err != nil {
if err != chronograf.ErrAnnotationNotFound {
msg := fmt.Errorf("error loading annotation: %v", err)
unknownErrorWithMessage(w, msg, s.Logger)
return
}
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
res := newAnnotationResponse(src, anno)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
type newAnnotationRequest struct {
StartTime time.Time
EndTime time.Time
Text string `json:"text,omitempty"` // Text is the associated user-facing text describing the annotation
Type string `json:"type,omitempty"` // Type describes the kind of annotation
}
func (ar *newAnnotationRequest) UnmarshalJSON(data []byte) error {
type Alias newAnnotationRequest
aux := &struct {
StartTime string `json:"startTime"` // StartTime is the time in rfc3339 milliseconds
EndTime string `json:"endTime"` // EndTime is the time in rfc3339 milliseconds
*Alias
}{
Alias: (*Alias)(ar),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
var err error
ar.StartTime, err = time.Parse(timeMilliFormat, aux.StartTime)
if err != nil {
return err
}
ar.EndTime, err = time.Parse(timeMilliFormat, aux.EndTime)
if err != nil {
return err
}
if ar.StartTime.After(ar.EndTime) {
ar.StartTime, ar.EndTime = ar.EndTime, ar.StartTime
}
return nil
}
func (ar *newAnnotationRequest) Annotation() *chronograf.Annotation {
return &chronograf.Annotation{
StartTime: ar.StartTime,
EndTime: ar.EndTime,
Text: ar.Text,
Type: ar.Type,
}
}
// NewAnnotation adds the annotation from a POST body to the annotations store
func (s *Service) NewAnnotation(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
src, err := s.Store.Sources(ctx).Get(ctx, id)
if err != nil {
notFound(w, id, s.Logger)
return
}
ts, err := s.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
var req newAnnotationRequest
if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
store := influx.NewAnnotationStore(ts)
anno, err := store.Add(ctx, req.Annotation())
if err != nil {
if err == chronograf.ErrUpstreamTimeout {
msg := "Timeout waiting for response"
Error(w, http.StatusRequestTimeout, msg, s.Logger)
return
}
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
res := newAnnotationResponse(src, anno)
location(w, res.Links.Self)
encodeJSON(w, http.StatusCreated, res, s.Logger)
}
// RemoveAnnotation removes the annotation from the time series source
func (s *Service) RemoveAnnotation(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
annoID, err := paramStr("aid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
src, err := s.Store.Sources(ctx).Get(ctx, id)
if err != nil {
notFound(w, id, s.Logger)
return
}
ts, err := s.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
store := influx.NewAnnotationStore(ts)
if err = store.Delete(ctx, annoID); err != nil {
if err == chronograf.ErrUpstreamTimeout {
msg := "Timeout waiting for response"
Error(w, http.StatusRequestTimeout, msg, s.Logger)
return
}
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
type updateAnnotationRequest struct {
StartTime *time.Time `json:"startTime,omitempty"` // StartTime is the time in rfc3339 milliseconds
EndTime *time.Time `json:"endTime,omitempty"` // EndTime is the time in rfc3339 milliseconds
Text *string `json:"text,omitempty"` // Text is the associated user-facing text describing the annotation
Type *string `json:"type,omitempty"` // Type describes the kind of annotation
}
// TODO: make sure that endtime is after starttime
func (u *updateAnnotationRequest) UnmarshalJSON(data []byte) error {
type Alias updateAnnotationRequest
aux := &struct {
StartTime *string `json:"startTime,omitempty"`
EndTime *string `json:"endTime,omitempty"`
*Alias
}{
Alias: (*Alias)(u),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if aux.StartTime != nil {
tm, err := time.Parse(timeMilliFormat, *aux.StartTime)
if err != nil {
return err
}
u.StartTime = &tm
}
if aux.EndTime != nil {
tm, err := time.Parse(timeMilliFormat, *aux.EndTime)
if err != nil {
return err
}
u.EndTime = &tm
}
// Update must have at least one field set
if u.StartTime == nil && u.EndTime == nil && u.Text == nil && u.Type == nil {
return fmt.Errorf("update request must have at least one field")
}
return nil
}
// UpdateAnnotation overwrite an existing annotation
func (s *Service) UpdateAnnotation(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
annoID, err := paramStr("aid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
src, err := s.Store.Sources(ctx).Get(ctx, id)
if err != nil {
notFound(w, id, s.Logger)
return
}
ts, err := s.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
store := influx.NewAnnotationStore(ts)
cur, err := store.Get(ctx, annoID)
if err != nil {
notFound(w, annoID, s.Logger)
return
}
var req updateAnnotationRequest
if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
if req.StartTime != nil {
cur.StartTime = *req.StartTime
}
if req.EndTime != nil {
cur.EndTime = *req.EndTime
}
if req.Text != nil {
cur.Text = *req.Text
}
if req.Type != nil {
cur.Type = *req.Type
}
if err = store.Update(ctx, cur); err != nil {
if err == chronograf.ErrUpstreamTimeout {
msg := "Timeout waiting for response"
Error(w, http.StatusRequestTimeout, msg, s.Logger)
return
}
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
res := newAnnotationResponse(src, cur)
location(w, res.Links.Self)
encodeJSON(w, http.StatusOK, res, s.Logger)
}

View File

@ -1,192 +0,0 @@
package server
import (
"bytes"
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
func TestService_Annotations(t *testing.T) {
type fields struct {
Store DataStore
TimeSeriesClient TimeSeriesClient
}
tests := []struct {
name string
fields fields
w *httptest.ResponseRecorder
r *http.Request
ID string
want string
}{
{
name: "error no id",
w: httptest.NewRecorder(),
r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations", bytes.NewReader([]byte(`howdy`))),
want: `{"code":422,"message":"error converting ID "}`,
},
{
name: "no since parameter",
ID: "1",
w: httptest.NewRecorder(),
r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations", bytes.NewReader([]byte(`howdy`))),
want: `{"code":422,"message":"since parameter is required"}`,
},
{
name: "invalid since parameter",
ID: "1",
w: httptest.NewRecorder(),
r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=howdy", bytes.NewReader([]byte(`howdy`))),
want: `{"code":422,"message":"parsing time \"howdy\" as \"2006-01-02T15:04:05.999Z07:00\": cannot parse \"howdy\" as \"2006\""}`,
},
{
name: "error is returned when get is an error",
fields: fields{
Store: &mocks.Store{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
return chronograf.Source{}, fmt.Errorf("error")
},
},
},
},
ID: "1",
w: httptest.NewRecorder(),
r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=1985-04-12T23:20:50.52Z", bytes.NewReader([]byte(`howdy`))),
want: `{"code":404,"message":"ID 1 not found"}`,
},
{
name: "error is returned connect is an error",
fields: fields{
Store: &mocks.Store{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
return chronograf.Source{
ID: ID,
}, nil
},
},
},
TimeSeriesClient: &mocks.TimeSeries{
ConnectF: func(context.Context, *chronograf.Source) error {
return fmt.Errorf("error)")
},
},
},
ID: "1",
w: httptest.NewRecorder(),
r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=1985-04-12T23:20:50.52Z", bytes.NewReader([]byte(`howdy`))),
want: `{"code":400,"message":"unable to connect to source 1: error)"}`,
},
{
name: "error returned when annotations are invalid",
fields: fields{
Store: &mocks.Store{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
return chronograf.Source{
ID: ID,
}, nil
},
},
},
TimeSeriesClient: &mocks.TimeSeries{
ConnectF: func(context.Context, *chronograf.Source) error {
return nil
},
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`{[]}`, nil), nil
},
},
},
ID: "1",
w: httptest.NewRecorder(),
r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=1985-04-12T23:20:50.52Z", bytes.NewReader([]byte(`howdy`))),
want: `{"code":500,"message":"unknown error: error loading annotations: invalid character '[' looking for beginning of object key string"}`,
},
{
name: "error is returned connect is an error",
fields: fields{
Store: &mocks.Store{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
return chronograf.Source{
ID: ID,
}, nil
},
},
},
TimeSeriesClient: &mocks.TimeSeries{
ConnectF: func(context.Context, *chronograf.Source) error {
return nil
},
QueryF: func(context.Context, chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(`[
{
"series": [
{
"name": "annotations",
"columns": [
"time",
"start_time",
"modified_time_ns",
"text",
"type",
"id"
],
"values": [
[
1516920177345000000,
0,
1516989242129417403,
"mytext",
"mytype",
"ea0aa94b-969a-4cd5-912a-5db61d502268"
]
]
}
]
}
]`, nil), nil
},
},
},
ID: "1",
w: httptest.NewRecorder(),
r: httptest.NewRequest("GET", "/chronograf/v1/sources/1/annotations?since=1985-04-12T23:20:50.52Z", bytes.NewReader([]byte(`howdy`))),
want: `{"annotations":[{"id":"ea0aa94b-969a-4cd5-912a-5db61d502268","startTime":"1970-01-01T00:00:00Z","endTime":"2018-01-25T22:42:57.345Z","text":"mytext","type":"mytype","links":{"self":"/chronograf/v1/sources/1/annotations/ea0aa94b-969a-4cd5-912a-5db61d502268"}}]}
`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.r = tt.r.WithContext(context.WithValue(
context.TODO(),
httprouter.ParamsKey,
httprouter.Params{
{
Key: "id",
Value: tt.ID,
},
}))
s := &Service{
Store: tt.fields.Store,
TimeSeriesClient: tt.fields.TimeSeriesClient,
Logger: mocks.NewLogger(),
}
s.Annotations(tt.w, tt.r)
got := tt.w.Body.String()
if got != tt.want {
t.Errorf("Annotations() got != want:\n%s\n%s", got, tt.want)
}
})
}
}

View File

@ -1,58 +0,0 @@
package server
import (
"net/http"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/dist"
)
const (
// Dir is prefix of the assets in the bindata
Dir = "../ui/build"
// Default is the default item to load if 404
Default = "../ui/build/index.html"
// DebugDir is the prefix of the assets in development mode
DebugDir = "ui/build"
// DebugDefault is the default item to load if 404
DebugDefault = "ui/build/index.html"
// DefaultContentType is the content-type to return for the Default file
DefaultContentType = "text/html; charset=utf-8"
)
// AssetsOpts configures the asset middleware
type AssetsOpts struct {
// Develop when true serves assets from ui/build directory directly; false will use internal bindata.
Develop bool
// Logger will log the asset served
Logger chronograf.Logger
}
// Assets creates a middleware that will serve a single page app.
func Assets(opts AssetsOpts) http.Handler {
var assets chronograf.Assets
if opts.Develop {
assets = &dist.DebugAssets{
Dir: DebugDir,
Default: DebugDefault,
}
} else {
assets = &dist.BindataAssets{
Prefix: Dir,
Default: Default,
DefaultContentType: DefaultContentType,
}
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if opts.Logger != nil {
opts.Logger.
WithField("component", "server").
WithField("remote_addr", r.RemoteAddr).
WithField("method", r.Method).
WithField("url", r.URL).
Info("Serving assets")
}
assets.Handler().ServeHTTP(w, r)
})
}

View File

@ -1,256 +0,0 @@
package server
import (
"context"
"fmt"
"net/http"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/oauth2"
"github.com/influxdata/influxdb/v2/chronograf/organizations"
"github.com/influxdata/influxdb/v2/chronograf/roles"
)
// HasAuthorizedToken extracts the token from a request and validates it using the authenticator.
// It is used by routes that need access to the token to populate links request.
func HasAuthorizedToken(auth oauth2.Authenticator, r *http.Request) (oauth2.Principal, error) {
ctx := r.Context()
return auth.Validate(ctx, r)
}
// AuthorizedToken extracts the token and validates; if valid the next handler
// will be run. The principal will be sent to the next handler via the request's
// Context. It is up to the next handler to determine if the principal has access.
// On failure, will return http.StatusForbidden.
func AuthorizedToken(auth oauth2.Authenticator, logger chronograf.Logger, next http.Handler) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log := logger.
WithField("component", "token_auth").
WithField("remote_addr", r.RemoteAddr).
WithField("method", r.Method).
WithField("url", r.URL)
ctx := r.Context()
// We do not check the authorization of the principal. Those
// served further down the chain should do so.
principal, err := auth.Validate(ctx, r)
if err != nil {
log.Error("Invalid principal")
w.WriteHeader(http.StatusForbidden)
return
}
// If the principal is valid we will extend its lifespan
// into the future
principal, err = auth.Extend(ctx, w, principal)
if err != nil {
log.Error("Unable to extend principal")
w.WriteHeader(http.StatusForbidden)
return
}
// Send the principal to the next handler
ctx = context.WithValue(ctx, oauth2.PrincipalKey, principal)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
// RawStoreAccess gives a super admin access to the data store without a facade.
func RawStoreAccess(logger chronograf.Logger, next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if isServer := hasServerContext(ctx); isServer {
next(w, r)
return
}
log := logger.
WithField("component", "raw_store").
WithField("remote_addr", r.RemoteAddr).
WithField("method", r.Method).
WithField("url", r.URL)
if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin {
r = r.WithContext(serverContext(ctx))
} else {
log.Error("User making request is not a SuperAdmin")
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
next(w, r)
}
}
// AuthorizedUser extracts the user name and provider from context. If the
// user and provider can be found on the context, we look up the user by their
// name and provider. If the user is found, we verify that the user has at at
// least the role supplied.
func AuthorizedUser(
store DataStore,
useAuth bool,
role string,
logger chronograf.Logger,
next http.HandlerFunc,
) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
serverCtx := serverContext(ctx)
log := logger.
WithField("component", "role_auth").
WithField("remote_addr", r.RemoteAddr).
WithField("method", r.Method).
WithField("url", r.URL)
defaultOrg, err := store.Organizations(serverCtx).DefaultOrganization(serverCtx)
if err != nil {
log.Error(fmt.Sprintf("Failed to retrieve the default organization: %v", err))
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
if !useAuth {
// If there is no auth, then set the organization id to be the default org id on context
// so that calls like hasOrganizationContext as used in Organization Config service
// method OrganizationConfig can successfully get the organization id
ctx = context.WithValue(ctx, organizations.ContextKey, defaultOrg.ID)
// And if there is no auth, then give the user raw access to the DataStore
r = r.WithContext(serverContext(ctx))
next(w, r)
return
}
p, err := getValidPrincipal(ctx)
if err != nil {
log.Error("Failed to retrieve principal from context")
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
scheme, err := getScheme(ctx)
if err != nil {
log.Error("Failed to retrieve scheme from context")
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
// This is as if the user was logged into the default organization
if p.Organization == "" {
p.Organization = defaultOrg.ID
}
// validate that the organization exists
_, err = store.Organizations(serverCtx).Get(serverCtx, chronograf.OrganizationQuery{ID: &p.Organization})
if err != nil {
log.Error(fmt.Sprintf("Failed to retrieve organization %s from organizations store", p.Organization))
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
ctx = context.WithValue(ctx, organizations.ContextKey, p.Organization)
// TODO: seems silly to look up a user twice
u, err := store.Users(serverCtx).Get(serverCtx, chronograf.UserQuery{
Name: &p.Subject,
Provider: &p.Issuer,
Scheme: &scheme,
})
if err != nil {
log.Error("Failed to retrieve user")
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
// In particular this is used by sever/users.go so that we know when and when not to
// allow users to make someone a super admin
ctx = context.WithValue(ctx, UserContextKey, u)
if u.SuperAdmin {
// To access resources (servers, sources, databases, layouts) within a DataStore,
// an organization and a role are required even if you are a super admin or are
// not using auth. Every user's current organization is set on context to filter
// the resources accessed within a DataStore, including for super admin or when
// not using auth. In this way, a DataStore can treat all requests the same,
// including those from a super admin and when not using auth.
//
// As for roles, in the case of super admin or when not using auth, the user's
// role on context (though not on their JWT or user) is set to be admin. In order
// to access all resources belonging to their current organization.
ctx = context.WithValue(ctx, roles.ContextKey, roles.AdminRoleName)
r = r.WithContext(ctx)
next(w, r)
return
}
u, err = store.Users(ctx).Get(ctx, chronograf.UserQuery{
Name: &p.Subject,
Provider: &p.Issuer,
Scheme: &scheme,
})
if err != nil {
log.Error("Failed to retrieve user")
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
if hasAuthorizedRole(u, role) {
if len(u.Roles) != 1 {
msg := `User %d has too many role in organization. User: %#v.Please report this log at https://github.com/influxdata/influxdb/chronograf/issues/new"`
log.Error(fmt.Sprint(msg, u.ID, u))
unknownErrorWithMessage(w, fmt.Errorf("please have administrator check logs and report error"), logger)
return
}
// use the first role, since there should only ever be one
// for any particular organization and hasAuthorizedRole
// should ensure that at least one role for the org exists
ctx = context.WithValue(ctx, roles.ContextKey, u.Roles[0].Name)
r = r.WithContext(ctx)
next(w, r)
return
}
Error(w, http.StatusForbidden, "User is not authorized", logger)
})
}
func hasAuthorizedRole(u *chronograf.User, role string) bool {
if u == nil {
return false
}
switch role {
case roles.MemberRoleName:
for _, r := range u.Roles {
switch r.Name {
case roles.MemberRoleName, roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName:
return true
}
}
case roles.ViewerRoleName:
for _, r := range u.Roles {
switch r.Name {
case roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName:
return true
}
}
case roles.EditorRoleName:
for _, r := range u.Roles {
switch r.Name {
case roles.EditorRoleName, roles.AdminRoleName:
return true
}
}
case roles.AdminRoleName:
for _, r := range u.Roles {
switch r.Name {
case roles.AdminRoleName:
return true
}
}
case roles.SuperAdminStatus:
// SuperAdmins should have been authorized before this.
// This is only meant to restrict access for non-superadmins.
return false
}
return false
}

File diff suppressed because it is too large Load Diff

View File

@ -1,186 +0,0 @@
package server
import (
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/canned"
"github.com/influxdata/influxdb/v2/chronograf/filestore"
"github.com/influxdata/influxdb/v2/chronograf/memdb"
"github.com/influxdata/influxdb/v2/chronograf/multistore"
)
// LayoutBuilder is responsible for building Layouts
type LayoutBuilder interface {
Build(chronograf.LayoutsStore) (*multistore.Layouts, error)
}
// MultiLayoutBuilder implements LayoutBuilder and will return a Layouts
type MultiLayoutBuilder struct {
Logger chronograf.Logger
UUID chronograf.ID
CannedPath string
}
// Build will construct a Layouts of canned and db-backed personalized
// layouts
func (builder *MultiLayoutBuilder) Build(db chronograf.LayoutsStore) (*multistore.Layouts, error) {
// These apps are those handled from a directory
apps := filestore.NewApps(builder.CannedPath, builder.UUID, builder.Logger)
// These apps are statically compiled into chronograf
binApps := &canned.BinLayoutsStore{
Logger: builder.Logger,
}
// Acts as a front-end to both the bolt layouts, filesystem layouts and binary statically compiled layouts.
// The idea here is that these stores form a hierarchy in which each is tried sequentially until
// the operation has success. So, the database is preferred over filesystem over binary data.
layouts := &multistore.Layouts{
Stores: []chronograf.LayoutsStore{
db,
apps,
binApps,
},
}
return layouts, nil
}
// DashboardBuilder is responsible for building dashboards
type DashboardBuilder interface {
Build(chronograf.DashboardsStore) (*multistore.DashboardsStore, error)
}
// MultiDashboardBuilder builds a DashboardsStore backed by bolt and the filesystem
type MultiDashboardBuilder struct {
Logger chronograf.Logger
ID chronograf.ID
Path string
}
// Build will construct a Dashboard store of filesystem and db-backed dashboards
func (builder *MultiDashboardBuilder) Build(db chronograf.DashboardsStore) (*multistore.DashboardsStore, error) {
// These dashboards are those handled from a directory
files := filestore.NewDashboards(builder.Path, builder.ID, builder.Logger)
// Acts as a front-end to both the bolt dashboard and filesystem dashboards.
// The idea here is that these stores form a hierarchy in which each is tried sequentially until
// the operation has success. So, the database is preferred over filesystem
dashboards := &multistore.DashboardsStore{
Stores: []chronograf.DashboardsStore{
db,
files,
},
}
return dashboards, nil
}
// SourcesBuilder builds a MultiSourceStore
type SourcesBuilder interface {
Build(chronograf.SourcesStore) (*multistore.SourcesStore, error)
}
// MultiSourceBuilder implements SourcesBuilder
type MultiSourceBuilder struct {
InfluxDBURL string
InfluxDBUsername string
InfluxDBPassword string
Logger chronograf.Logger
ID chronograf.ID
Path string
}
// Build will return a MultiSourceStore
func (fs *MultiSourceBuilder) Build(db chronograf.SourcesStore) (*multistore.SourcesStore, error) {
// These dashboards are those handled from a directory
files := filestore.NewSources(fs.Path, fs.ID, fs.Logger)
stores := []chronograf.SourcesStore{db, files}
if fs.InfluxDBURL != "" {
influxStore := &memdb.SourcesStore{
Source: &chronograf.Source{
ID: 0,
Name: fs.InfluxDBURL,
Type: chronograf.InfluxDB,
Username: fs.InfluxDBUsername,
Password: fs.InfluxDBPassword,
URL: fs.InfluxDBURL,
Default: true,
}}
stores = append([]chronograf.SourcesStore{influxStore}, stores...)
}
sources := &multistore.SourcesStore{
Stores: stores,
}
return sources, nil
}
// KapacitorBuilder builds a KapacitorStore
type KapacitorBuilder interface {
Build(chronograf.ServersStore) (*multistore.KapacitorStore, error)
}
// MultiKapacitorBuilder implements KapacitorBuilder
type MultiKapacitorBuilder struct {
KapacitorURL string
KapacitorUsername string
KapacitorPassword string
Logger chronograf.Logger
ID chronograf.ID
Path string
}
// Build will return a multistore facade KapacitorStore over memdb and bolt
func (builder *MultiKapacitorBuilder) Build(db chronograf.ServersStore) (*multistore.KapacitorStore, error) {
// These dashboards are those handled from a directory
files := filestore.NewKapacitors(builder.Path, builder.ID, builder.Logger)
stores := []chronograf.ServersStore{db, files}
if builder.KapacitorURL != "" {
memStore := &memdb.KapacitorStore{
Kapacitor: &chronograf.Server{
ID: 0,
SrcID: 0,
Name: builder.KapacitorURL,
URL: builder.KapacitorURL,
Username: builder.KapacitorUsername,
Password: builder.KapacitorPassword,
},
}
stores = append([]chronograf.ServersStore{memStore}, stores...)
}
kapacitors := &multistore.KapacitorStore{
Stores: stores,
}
return kapacitors, nil
}
// OrganizationBuilder is responsible for building dashboards
type OrganizationBuilder interface {
Build(chronograf.OrganizationsStore) (*multistore.OrganizationsStore, error)
}
// MultiOrganizationBuilder builds a OrganizationsStore backed by bolt and the filesystem
type MultiOrganizationBuilder struct {
Logger chronograf.Logger
Path string
}
// Build will construct a Organization store of filesystem and db-backed dashboards
func (builder *MultiOrganizationBuilder) Build(db chronograf.OrganizationsStore) (*multistore.OrganizationsStore, error) {
// These organization are those handled from a directory
files := filestore.NewOrganizations(builder.Path, builder.Logger)
// Acts as a front-end to both the bolt org and filesystem orgs.
// The idea here is that these stores form a hierarchy in which each is tried sequentially until
// the operation has success. So, the database is preferred over filesystem
orgs := &multistore.OrganizationsStore{
Stores: []chronograf.OrganizationsStore{
db,
files,
},
}
return orgs, nil
}

View File

@ -1,30 +0,0 @@
package server_test
import (
"testing"
"github.com/influxdata/influxdb/v2/chronograf/server"
)
func TestLayoutBuilder(t *testing.T) {
var l server.LayoutBuilder = &server.MultiLayoutBuilder{}
layout, err := l.Build(nil)
if err != nil {
t.Fatalf("MultiLayoutBuilder can't build a MultiLayoutsStore: %v", err)
}
if layout == nil {
t.Fatal("LayoutBuilder should have built a layout")
}
}
func TestSourcesStoresBuilder(t *testing.T) {
var b server.SourcesBuilder = &server.MultiSourceBuilder{}
sources, err := b.Build(nil)
if err != nil {
t.Fatalf("MultiSourceBuilder can't build a MultiSourcesStore: %v", err)
}
if sources == nil {
t.Fatal("SourcesBuilder should have built a MultiSourceStore")
}
}

View File

@ -1,358 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
idgen "github.com/influxdata/influxdb/v2/chronograf/id"
)
const (
// DefaultWidth is used if not specified
DefaultWidth = 4
// DefaultHeight is used if not specified
DefaultHeight = 4
)
type dashboardCellLinks struct {
Self string `json:"self"` // Self link mapping to this resource
}
type dashboardCellResponse struct {
chronograf.DashboardCell
Links dashboardCellLinks `json:"links"`
}
func newCellResponse(dID chronograf.DashboardID, cell chronograf.DashboardCell) dashboardCellResponse {
base := "/chronograf/v1/dashboards"
if cell.Queries == nil {
cell.Queries = []chronograf.DashboardQuery{}
}
if cell.CellColors == nil {
cell.CellColors = []chronograf.CellColor{}
}
// Copy to handle race condition
newAxes := make(map[string]chronograf.Axis, len(cell.Axes))
for k, v := range cell.Axes {
newAxes[k] = v
}
// ensure x, y, and y2 axes always returned
for _, lbl := range []string{"x", "y", "y2"} {
if _, found := newAxes[lbl]; !found {
newAxes[lbl] = chronograf.Axis{
Bounds: []string{"", ""},
}
}
}
cell.Axes = newAxes
return dashboardCellResponse{
DashboardCell: cell,
Links: dashboardCellLinks{
Self: fmt.Sprintf("%s/%d/cells/%s", base, dID, cell.ID),
},
}
}
func newCellResponses(dID chronograf.DashboardID, dcells []chronograf.DashboardCell) []dashboardCellResponse {
cells := make([]dashboardCellResponse, len(dcells))
for i, cell := range dcells {
cells[i] = newCellResponse(dID, cell)
}
return cells
}
// ValidDashboardCellRequest verifies that the dashboard cells have a query and
// have the correct axes specified
func ValidDashboardCellRequest(c *chronograf.DashboardCell) error {
if c == nil {
return fmt.Errorf("chronograf dashboard cell was nil")
}
CorrectWidthHeight(c)
for _, q := range c.Queries {
if err := ValidateQueryConfig(&q.QueryConfig); err != nil {
return err
}
}
MoveTimeShift(c)
err := HasCorrectAxes(c)
if err != nil {
return err
}
if err = HasCorrectColors(c); err != nil {
return err
}
return nil
}
// HasCorrectAxes verifies that only permitted axes exist within a DashboardCell
func HasCorrectAxes(c *chronograf.DashboardCell) error {
for label, axis := range c.Axes {
if !oneOf(label, "x", "y", "y2") {
return chronograf.ErrInvalidAxis
}
if !oneOf(axis.Scale, "linear", "log", "") {
return chronograf.ErrInvalidAxis
}
if !oneOf(axis.Base, "10", "2", "") {
return chronograf.ErrInvalidAxis
}
}
return nil
}
// HasCorrectColors verifies that the format of each color is correct
func HasCorrectColors(c *chronograf.DashboardCell) error {
for _, color := range c.CellColors {
if !oneOf(color.Type, "max", "min", "threshold", "text", "background", "scale") {
return chronograf.ErrInvalidColorType
}
if len(color.Hex) != 7 {
return chronograf.ErrInvalidColor
}
}
return nil
}
// oneOf reports whether a provided string is a member of a variadic list of
// valid options
func oneOf(prop string, validOpts ...string) bool {
for _, valid := range validOpts {
if prop == valid {
return true
}
}
return false
}
// CorrectWidthHeight changes the cell to have at least the
// minimum width and height
func CorrectWidthHeight(c *chronograf.DashboardCell) {
if c.W < 1 {
c.W = DefaultWidth
}
if c.H < 1 {
c.H = DefaultHeight
}
}
// MoveTimeShift moves TimeShift from the QueryConfig to the DashboardQuery
func MoveTimeShift(c *chronograf.DashboardCell) {
for i, query := range c.Queries {
query.Shifts = query.QueryConfig.Shifts
c.Queries[i] = query
}
}
// AddQueryConfig updates a cell by converting InfluxQL into queryconfigs
// If influxql cannot be represented by a full query config, then, the
// query config's raw text is set to the command.
func AddQueryConfig(c *chronograf.DashboardCell) {
for i, q := range c.Queries {
qc := ToQueryConfig(q.Command)
qc.Shifts = append([]chronograf.TimeShift(nil), q.Shifts...)
q.Shifts = nil
q.QueryConfig = qc
c.Queries[i] = q
}
}
// DashboardCells returns all cells from a dashboard within the store
func (s *Service) DashboardCells(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
e, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
boards := newDashboardResponse(e)
cells := boards.Cells
encodeJSON(w, http.StatusOK, cells, s.Logger)
}
// NewDashboardCell adds a cell to an existing dashboard
func (s *Service) NewDashboardCell(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
var cell chronograf.DashboardCell
if err := json.NewDecoder(r.Body).Decode(&cell); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := ValidDashboardCellRequest(&cell); err != nil {
invalidData(w, err, s.Logger)
return
}
ids := &idgen.UUID{}
cid, err := ids.Generate()
if err != nil {
msg := fmt.Sprintf("Error creating cell ID of dashboard %d: %v", id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
cell.ID = cid
dash.Cells = append(dash.Cells, cell)
if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil {
msg := fmt.Sprintf("Error adding cell %s to dashboard %d: %v", cid, id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
boards := newDashboardResponse(dash)
for _, cell := range boards.Cells {
if cell.ID == cid {
encodeJSON(w, http.StatusOK, cell, s.Logger)
return
}
}
}
// DashboardCellID gets a specific cell from an existing dashboard
func (s *Service) DashboardCellID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
boards := newDashboardResponse(dash)
cid := httprouter.ParamsFromContext(ctx).ByName("cid")
for _, cell := range boards.Cells {
if cell.ID == cid {
encodeJSON(w, http.StatusOK, cell, s.Logger)
return
}
}
notFound(w, id, s.Logger)
}
// RemoveDashboardCell removes a specific cell from an existing dashboard
func (s *Service) RemoveDashboardCell(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
cid := httprouter.ParamsFromContext(ctx).ByName("cid")
cellid := -1
for i, cell := range dash.Cells {
if cell.ID == cid {
cellid = i
break
}
}
if cellid == -1 {
notFound(w, id, s.Logger)
return
}
dash.Cells = append(dash.Cells[:cellid], dash.Cells[cellid+1:]...)
if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil {
msg := fmt.Sprintf("Error removing cell %s from dashboard %d: %v", cid, id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// ReplaceDashboardCell replaces a cell entirely within an existing dashboard
func (s *Service) ReplaceDashboardCell(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
cid := httprouter.ParamsFromContext(ctx).ByName("cid")
cellid := -1
for i, cell := range dash.Cells {
if cell.ID == cid {
cellid = i
break
}
}
if cellid == -1 {
notFound(w, cid, s.Logger)
return
}
var cell chronograf.DashboardCell
if err := json.NewDecoder(r.Body).Decode(&cell); err != nil {
invalidJSON(w, s.Logger)
return
}
for i, a := range cell.Axes {
if len(a.Bounds) == 0 {
a.Bounds = []string{"", ""}
cell.Axes[i] = a
}
}
if err := ValidDashboardCellRequest(&cell); err != nil {
invalidData(w, err, s.Logger)
return
}
cell.ID = cid
dash.Cells[cellid] = cell
if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil {
msg := fmt.Sprintf("Error updating cell %s in dashboard %d: %v", cid, id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
res := newCellResponse(dash.ID, cell)
encodeJSON(w, http.StatusOK, res, s.Logger)
}

View File

@ -1,889 +0,0 @@
package server
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
func Test_Cells_CorrectAxis(t *testing.T) {
t.Parallel()
axisTests := []struct {
name string
cell *chronograf.DashboardCell
shouldFail bool
}{
{
name: "correct axes",
cell: &chronograf.DashboardCell{
Axes: map[string]chronograf.Axis{
"x": {
Bounds: []string{"0", "100"},
},
"y": {
Bounds: []string{"0", "100"},
},
"y2": {
Bounds: []string{"0", "100"},
},
},
},
},
{
name: "invalid axes present",
cell: &chronograf.DashboardCell{
Axes: map[string]chronograf.Axis{
"axis of evil": {
Bounds: []string{"666", "666"},
},
"axis of awesome": {
Bounds: []string{"1337", "31337"},
},
},
},
shouldFail: true,
},
{
name: "linear scale value",
cell: &chronograf.DashboardCell{
Axes: map[string]chronograf.Axis{
"x": {
Scale: "linear",
Bounds: []string{"0", "100"},
},
},
},
},
{
name: "log scale value",
cell: &chronograf.DashboardCell{
Axes: map[string]chronograf.Axis{
"x": {
Scale: "log",
Bounds: []string{"0", "100"},
},
},
},
},
{
name: "invalid scale value",
cell: &chronograf.DashboardCell{
Axes: map[string]chronograf.Axis{
"x": {
Scale: "potatoes",
Bounds: []string{"0", "100"},
},
},
},
shouldFail: true,
},
{
name: "base 10 axis",
cell: &chronograf.DashboardCell{
Axes: map[string]chronograf.Axis{
"x": {
Base: "10",
Bounds: []string{"0", "100"},
},
},
},
},
{
name: "base 2 axis",
cell: &chronograf.DashboardCell{
Axes: map[string]chronograf.Axis{
"x": {
Base: "2",
Bounds: []string{"0", "100"},
},
},
},
},
{
name: "invalid base",
cell: &chronograf.DashboardCell{
Axes: map[string]chronograf.Axis{
"x": {
Base: "all your base are belong to us",
Bounds: []string{"0", "100"},
},
},
},
shouldFail: true,
},
}
for _, test := range axisTests {
t.Run(test.name, func(tt *testing.T) {
if err := HasCorrectAxes(test.cell); err != nil && !test.shouldFail {
t.Errorf("%q: Unexpected error: err: %s", test.name, err)
} else if err == nil && test.shouldFail {
t.Errorf("%q: Expected error and received none", test.name)
}
})
}
}
func Test_Service_DashboardCells(t *testing.T) {
cellsTests := []struct {
name string
reqURL *url.URL
ctxParams map[string]string
mockResponse []chronograf.DashboardCell
expected []chronograf.DashboardCell
expectedCode int
}{
{
name: "happy path",
reqURL: &url.URL{
Path: "/chronograf/v1/dashboards/1/cells",
},
ctxParams: map[string]string{
"id": "1",
},
mockResponse: []chronograf.DashboardCell{},
expected: []chronograf.DashboardCell{},
expectedCode: http.StatusOK,
},
{
name: "cell axes should always be \"x\", \"y\", and \"y2\"",
reqURL: &url.URL{
Path: "/chronograf/v1/dashboards/1/cells",
},
ctxParams: map[string]string{
"id": "1",
},
mockResponse: []chronograf.DashboardCell{
{
ID: "3899be5a-f6eb-4347-b949-de2f4fbea859",
X: 0,
Y: 0,
W: 4,
H: 4,
Name: "CPU",
Type: "bar",
Queries: []chronograf.DashboardQuery{},
Axes: map[string]chronograf.Axis{},
},
},
expected: []chronograf.DashboardCell{
{
ID: "3899be5a-f6eb-4347-b949-de2f4fbea859",
X: 0,
Y: 0,
W: 4,
H: 4,
Name: "CPU",
Type: "bar",
Queries: []chronograf.DashboardQuery{},
CellColors: []chronograf.CellColor{},
Axes: map[string]chronograf.Axis{
"x": {
Bounds: []string{"", ""},
},
"y": {
Bounds: []string{"", ""},
},
"y2": {
Bounds: []string{"", ""},
},
},
},
},
expectedCode: http.StatusOK,
},
}
for _, test := range cellsTests {
t.Run(test.name, func(t *testing.T) {
t.Parallel()
// setup context with params
params := httprouter.Params{}
for k, v := range test.ctxParams {
params = append(params, httprouter.Param{
Key: k,
Value: v,
})
}
ctx := context.WithValue(
context.Background(),
httprouter.ParamsKey,
params,
)
// setup response recorder and request
rr := httptest.NewRecorder()
req := httptest.NewRequest("GET", test.reqURL.RequestURI(), strings.NewReader("")).WithContext(ctx)
// setup mock DashboardCells store and logger
tlog := &mocks.TestLogger{}
svc := &Service{
Store: &mocks.Store{
DashboardsStore: &mocks.DashboardsStore{
GetF: func(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) {
return chronograf.Dashboard{
ID: chronograf.DashboardID(1),
Cells: test.mockResponse,
Templates: []chronograf.Template{},
Name: "empty dashboard",
}, nil
},
},
},
Logger: tlog,
}
// invoke DashboardCell handler
svc.DashboardCells(rr, req)
// setup frame to decode response into
respFrame := []struct {
chronograf.DashboardCell
Links json.RawMessage `json:"links"` // ignore links
}{}
// decode response
resp := rr.Result()
if resp.StatusCode != test.expectedCode {
tlog.Dump(t)
t.Fatalf("%q - Status codes do not match. Want %d (%s), Got %d (%s)", test.name, test.expectedCode, http.StatusText(test.expectedCode), resp.StatusCode, http.StatusText(resp.StatusCode))
}
if err := json.NewDecoder(resp.Body).Decode(&respFrame); err != nil {
t.Fatalf("%q - Error unmarshalling response body: err: %s", test.name, err)
}
// extract actual
actual := []chronograf.DashboardCell{}
for _, rsp := range respFrame {
actual = append(actual, rsp.DashboardCell)
}
// compare actual and expected
if !cmp.Equal(actual, test.expected) {
t.Fatalf("%q - Dashboard Cells do not match: diff: %s", test.name, cmp.Diff(actual, test.expected))
}
})
}
}
func TestHasCorrectColors(t *testing.T) {
tests := []struct {
name string
c *chronograf.DashboardCell
wantErr bool
}{
{
name: "min type is valid",
c: &chronograf.DashboardCell{
CellColors: []chronograf.CellColor{
{
Type: "min",
Hex: "#FFFFFF",
},
},
},
},
{
name: "max type is valid",
c: &chronograf.DashboardCell{
CellColors: []chronograf.CellColor{
{
Type: "max",
Hex: "#FFFFFF",
},
},
},
},
{
name: "threshold type is valid",
c: &chronograf.DashboardCell{
CellColors: []chronograf.CellColor{
{
Type: "threshold",
Hex: "#FFFFFF",
},
},
},
},
{
name: "invalid color type",
c: &chronograf.DashboardCell{
CellColors: []chronograf.CellColor{
{
Type: "unknown",
Hex: "#FFFFFF",
},
},
},
wantErr: true,
},
{
name: "invalid color hex",
c: &chronograf.DashboardCell{
CellColors: []chronograf.CellColor{
{
Type: "min",
Hex: "bad",
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := HasCorrectColors(tt.c); (err != nil) != tt.wantErr {
t.Errorf("HasCorrectColors() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestService_ReplaceDashboardCell(t *testing.T) {
tests := []struct {
name string
DashboardsStore chronograf.DashboardsStore
ID string
CID string
w *httptest.ResponseRecorder
r *http.Request
want string
}{
{
name: "update cell retains query config",
ID: "1",
CID: "3c5c4102-fa40-4585-a8f9-917c77e37192",
DashboardsStore: &mocks.DashboardsStore{
UpdateF: func(ctx context.Context, target chronograf.Dashboard) error {
return nil
},
GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) {
return chronograf.Dashboard{
ID: ID,
Cells: []chronograf.DashboardCell{
{
ID: "3c5c4102-fa40-4585-a8f9-917c77e37192",
W: 4,
H: 4,
Name: "Untitled Cell",
Queries: []chronograf.DashboardQuery{
{
Command: "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)",
QueryConfig: chronograf.QueryConfig{
ID: "3cd3eaa4-a4b8-44b3-b69e-0c7bf6b91d9e",
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Alias: "mean_usage_user",
Args: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
},
},
Tags: map[string][]string{
"cpu": {
"ChristohersMBP2.lan",
},
},
GroupBy: chronograf.GroupBy{
Time: "2s",
Tags: []string{},
},
AreTagsAccepted: true,
Fill: "null",
RawText: strPtr("SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)"),
Range: &chronograf.DurationRange{
Lower: "now() - 15m"},
Shifts: []chronograf.TimeShift{},
},
},
},
Axes: map[string]chronograf.Axis{
"x": {
Bounds: []string{"", ""},
},
"y": {
Bounds: []string{"", ""},
},
"y2": {
Bounds: []string{"", ""},
},
},
Type: "line",
CellColors: []chronograf.CellColor{
{
ID: "0",
Type: "min",
Hex: "#00C9FF",
Name: "laser",
Value: "0",
},
{
ID: "1",
Type: "max",
Hex: "#9394FF",
Name: "comet",
Value: "100",
},
},
},
},
}, nil
},
},
w: httptest.NewRecorder(),
r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte(`
{
"i": "3c5c4102-fa40-4585-a8f9-917c77e37192",
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"name": "Untitled Cell",
"queries": [
{
"queryConfig": {
"id": "3cd3eaa4-a4b8-44b3-b69e-0c7bf6b91d9e",
"database": "telegraf",
"measurement": "cpu",
"retentionPolicy": "autogen",
"fields": [
{
"value": "mean",
"type": "func",
"alias": "mean_usage_user",
"args": [{"value": "usage_user", "type": "field", "alias": ""}]
}
],
"tags": {"cpu": ["ChristohersMBP2.lan"]},
"groupBy": {"time": "2s", "tags": []},
"areTagsAccepted": true,
"fill": "null",
"rawText":
"SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)",
"range": {"upper": "", "lower": "now() - 15m"},
"shifts": []
},
"query":
"SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)",
"source": null
}
],
"axes": {
"x": {
"bounds": ["",""],
"label": "",
"prefix": "",
"suffix": "",
"base": "",
"scale": ""
},
"y": {
"bounds": ["",""],
"label": "",
"prefix": "",
"suffix": "",
"base": "",
"scale": ""
},
"y2": {
"bounds": ["",""],
"label": "",
"prefix": "",
"suffix": "",
"base": "",
"scale": ""
}
},
"type": "line",
"colors": [
{"type": "min", "hex": "#00C9FF", "id": "0", "name": "laser", "value": "0"},
{
"type": "max",
"hex": "#9394FF",
"id": "1",
"name": "comet",
"value": "100"
}
],
"links": {
"self":
"/chronograf/v1/dashboards/6/cells/3c5c4102-fa40-4585-a8f9-917c77e37192"
}
}
`))),
want: `{"i":"3c5c4102-fa40-4585-a8f9-917c77e37192","x":0,"y":0,"w":4,"h":4,"name":"Untitled Cell","queries":[{"query":"SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)","queryConfig":{"id":"3cd3eaa4-a4b8-44b3-b69e-0c7bf6b91d9e","database":"telegraf","measurement":"cpu","retentionPolicy":"autogen","fields":[{"value":"mean","type":"func","alias":"mean_usage_user","args":[{"value":"usage_user","type":"field","alias":""}]}],"tags":{"cpu":["ChristohersMBP2.lan"]},"groupBy":{"time":"2s","tags":[]},"areTagsAccepted":true,"fill":"null","rawText":"SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)","range":{"upper":"","lower":"now() - 15m"},"shifts":[]},"source":""}],"axes":{"x":{"bounds":["",""],"label":"","prefix":"","suffix":"","base":"","scale":""},"y":{"bounds":["",""],"label":"","prefix":"","suffix":"","base":"","scale":""},"y2":{"bounds":["",""],"label":"","prefix":"","suffix":"","base":"","scale":""}},"type":"line","colors":[{"id":"0","type":"min","hex":"#00C9FF","name":"laser","value":"0"},{"id":"1","type":"max","hex":"#9394FF","name":"comet","value":"100"}],"tableOptions":{"verticalTimeAxis":false,"sortBy":{"internalName":"","displayName":"","visible":false},"wrapping":"","fixFirstColumn":false},"fieldOptions":null,"timeFormat":"","decimalPlaces":{"isEnforced":false,"digits":0},"links":{"self":"/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192"}}
`,
},
{
name: "dashboard doesn't exist",
ID: "1",
DashboardsStore: &mocks.DashboardsStore{
GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) {
return chronograf.Dashboard{}, fmt.Errorf("doesn't exist")
},
},
w: httptest.NewRecorder(),
r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", nil),
want: `{"code":404,"message":"ID 1 not found"}`,
},
{
name: "cell doesn't exist",
ID: "1",
CID: "3c5c4102-fa40-4585-a8f9-917c77e37192",
DashboardsStore: &mocks.DashboardsStore{
GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) {
return chronograf.Dashboard{}, nil
},
},
w: httptest.NewRecorder(),
r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", nil),
want: `{"code":404,"message":"ID 3c5c4102-fa40-4585-a8f9-917c77e37192 not found"}`,
},
{
name: "invalid query config",
ID: "1",
CID: "3c5c4102-fa40-4585-a8f9-917c77e37192",
DashboardsStore: &mocks.DashboardsStore{
GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) {
return chronograf.Dashboard{
ID: ID,
Cells: []chronograf.DashboardCell{
{
ID: "3c5c4102-fa40-4585-a8f9-917c77e37192",
},
},
}, nil
},
},
w: httptest.NewRecorder(),
r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", bytes.NewReader([]byte(`{
"i": "3c5c4102-fa40-4585-a8f9-917c77e37192",
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"name": "Untitled Cell",
"queries": [
{
"queryConfig": {
"fields": [
{
"value": "invalid",
"type": "invalidType"
}
]
}
}
]
}`))),
want: `{"code":422,"message":"invalid field type \"invalidType\" ; expect func, field, integer, number, regex, wildcard"}`,
},
{
name: "JSON is not parsable",
ID: "1",
CID: "3c5c4102-fa40-4585-a8f9-917c77e37192",
DashboardsStore: &mocks.DashboardsStore{
GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) {
return chronograf.Dashboard{
ID: ID,
Cells: []chronograf.DashboardCell{
{
ID: "3c5c4102-fa40-4585-a8f9-917c77e37192",
},
},
}, nil
},
},
w: httptest.NewRecorder(),
r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", nil),
want: `{"code":400,"message":"unparsable JSON"}`,
},
{
name: "not able to update store returns error message",
ID: "1",
CID: "3c5c4102-fa40-4585-a8f9-917c77e37192",
DashboardsStore: &mocks.DashboardsStore{
UpdateF: func(ctx context.Context, target chronograf.Dashboard) error {
return fmt.Errorf("error")
},
GetF: func(ctx context.Context, ID chronograf.DashboardID) (chronograf.Dashboard, error) {
return chronograf.Dashboard{
ID: ID,
Cells: []chronograf.DashboardCell{
{
ID: "3c5c4102-fa40-4585-a8f9-917c77e37192",
},
},
}, nil
},
},
w: httptest.NewRecorder(),
r: httptest.NewRequest("PUT", "/chronograf/v1/dashboards/1/cells/3c5c4102-fa40-4585-a8f9-917c77e37192", bytes.NewReader([]byte(`{
"i": "3c5c4102-fa40-4585-a8f9-917c77e37192",
"x": 0,
"y": 0,
"w": 4,
"h": 4,
"name": "Untitled Cell",
"queries": [
{
"queryConfig": {
"fields": [
{
"value": "usage_user",
"type": "field"
}
]
}
}
]
}`))),
want: `{"code":500,"message":"Error updating cell 3c5c4102-fa40-4585-a8f9-917c77e37192 in dashboard 1: error"}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
DashboardsStore: tt.DashboardsStore,
},
Logger: &mocks.TestLogger{},
}
tt.r = WithContext(tt.r.Context(), tt.r, map[string]string{
"id": tt.ID,
"cid": tt.CID,
})
tt.r = tt.r.WithContext(context.WithValue(
context.TODO(),
httprouter.ParamsKey,
httprouter.Params{
{
Key: "id",
Value: tt.ID,
},
{
Key: "cid",
Value: tt.CID,
},
}))
s.ReplaceDashboardCell(tt.w, tt.r)
got := tt.w.Body.String()
if got != tt.want {
t.Errorf("ReplaceDashboardCell() = got/want\n%s\n%s\n", got, tt.want)
}
})
}
}
func strPtr(s string) *string {
return &s
}
func Test_newCellResponses(t *testing.T) {
tests := []struct {
name string
dID chronograf.DashboardID
dcells []chronograf.DashboardCell
want []dashboardCellResponse
}{
{
name: "all fields set",
dID: chronograf.DashboardID(1),
dcells: []chronograf.DashboardCell{
{
ID: "445f8dc0-4d73-4168-8477-f628690d18a3",
X: 0,
Y: 0,
W: 4,
H: 4,
Name: "Untitled Cell",
Queries: []chronograf.DashboardQuery{
{
Command: "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)",
Label: "",
QueryConfig: chronograf.QueryConfig{
ID: "8d5ec6da-13a5-423e-9026-7bc45649766c",
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Alias: "mean_usage_user",
Args: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
Alias: "",
},
},
},
},
Tags: map[string][]string{"cpu": {"ChristohersMBP2.lan"}},
GroupBy: chronograf.GroupBy{
Time: "2s",
},
AreTagsAccepted: true,
Fill: "null",
RawText: strPtr("SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)"),
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
Source: "",
},
},
Axes: map[string]chronograf.Axis{
"x": {},
"y": {},
"y2": {},
},
Type: "line",
CellColors: []chronograf.CellColor{
{ID: "0", Type: "min", Hex: "#00C9FF", Name: "laser", Value: "0"},
{ID: "1", Type: "max", Hex: "#9394FF", Name: "comet", Value: "100"},
},
},
},
want: []dashboardCellResponse{
{
DashboardCell: chronograf.DashboardCell{
ID: "445f8dc0-4d73-4168-8477-f628690d18a3",
W: 4,
H: 4,
Name: "Untitled Cell",
Queries: []chronograf.DashboardQuery{
{
Command: "SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)",
QueryConfig: chronograf.QueryConfig{
ID: "8d5ec6da-13a5-423e-9026-7bc45649766c",
Database: "telegraf",
Measurement: "cpu",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Alias: "mean_usage_user",
Args: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
},
},
Tags: map[string][]string{"cpu": {"ChristohersMBP2.lan"}},
GroupBy: chronograf.GroupBy{
Time: "2s",
},
AreTagsAccepted: true,
Fill: "null",
RawText: strPtr("SELECT mean(\"usage_user\") AS \"mean_usage_user\" FROM \"telegraf\".\"autogen\".\"cpu\" WHERE time > :dashboardTime: AND \"cpu\"=:cpu: GROUP BY :interval: FILL(null)"),
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
},
Axes: map[string]chronograf.Axis{
"x": {},
"y": {},
"y2": {},
},
Type: "line",
CellColors: []chronograf.CellColor{
{
ID: "0",
Type: "min",
Hex: "#00C9FF",
Name: "laser",
Value: "0",
},
{
ID: "1",
Type: "max",
Hex: "#9394FF",
Name: "comet",
Value: "100",
},
},
},
Links: dashboardCellLinks{
Self: "/chronograf/v1/dashboards/1/cells/445f8dc0-4d73-4168-8477-f628690d18a3"},
},
},
},
{
name: "nothing set",
dID: chronograf.DashboardID(1),
dcells: []chronograf.DashboardCell{
{
ID: "445f8dc0-4d73-4168-8477-f628690d18a3",
X: 0,
Y: 0,
W: 4,
H: 4,
Name: "Untitled Cell",
},
},
want: []dashboardCellResponse{
{
DashboardCell: chronograf.DashboardCell{
ID: "445f8dc0-4d73-4168-8477-f628690d18a3",
W: 4,
H: 4,
Name: "Untitled Cell",
Queries: []chronograf.DashboardQuery{},
Axes: map[string]chronograf.Axis{
"x": {
Bounds: []string{"", ""},
},
"y": {
Bounds: []string{"", ""},
},
"y2": {
Bounds: []string{"", ""},
},
},
CellColors: []chronograf.CellColor{},
},
Links: dashboardCellLinks{
Self: "/chronograf/v1/dashboards/1/cells/445f8dc0-4d73-4168-8477-f628690d18a3"},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := newCellResponses(tt.dID, tt.dcells); !reflect.DeepEqual(got, tt.want) {
t.Errorf("newCellResponses() = got-/want+ %s", cmp.Diff(got, tt.want))
}
})
}
}

View File

@ -1,115 +0,0 @@
package server
import (
"encoding/json"
"net/http"
"github.com/influxdata/influxdb/v2/chronograf"
)
type configLinks struct {
Self string `json:"self"` // Self link mapping to this resource
Auth string `json:"auth"` // Auth link to the auth config endpoint
}
type selfLinks struct {
Self string `json:"self"` // Self link mapping to this resource
}
type configResponse struct {
Links configLinks `json:"links"`
chronograf.Config
}
func newConfigResponse(config chronograf.Config) *configResponse {
return &configResponse{
Links: configLinks{
Self: "/chronograf/v1/config",
Auth: "/chronograf/v1/config/auth",
},
Config: config,
}
}
type authConfigResponse struct {
Links selfLinks `json:"links"`
chronograf.AuthConfig
}
func newAuthConfigResponse(config chronograf.Config) *authConfigResponse {
return &authConfigResponse{
Links: selfLinks{
Self: "/chronograf/v1/config/auth",
},
AuthConfig: config.Auth,
}
}
// Config retrieves the global application configuration
func (s *Service) Config(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
config, err := s.Store.Config(ctx).Get(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
if config == nil {
Error(w, http.StatusBadRequest, "Configuration object was nil", s.Logger)
return
}
res := newConfigResponse(*config)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// AuthConfig retrieves the auth section of the global application configuration
func (s *Service) AuthConfig(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
config, err := s.Store.Config(ctx).Get(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
if config == nil {
Error(w, http.StatusBadRequest, "Configuration object was nil", s.Logger)
return
}
res := newAuthConfigResponse(*config)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// ReplaceAuthConfig replaces the auth section of the global application configuration
func (s *Service) ReplaceAuthConfig(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var authConfig chronograf.AuthConfig
if err := json.NewDecoder(r.Body).Decode(&authConfig); err != nil {
invalidJSON(w, s.Logger)
return
}
config, err := s.Store.Config(ctx).Get(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
if config == nil {
Error(w, http.StatusBadRequest, "Configuration object was nil", s.Logger)
return
}
config.Auth = authConfig
res := newAuthConfigResponse(*config)
if err := s.Store.Config(ctx).Update(ctx, config); err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
encodeJSON(w, http.StatusOK, res, s.Logger)
}

View File

@ -1,218 +0,0 @@
package server
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http/httptest"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
func TestConfig(t *testing.T) {
type fields struct {
ConfigStore chronograf.ConfigStore
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
wants wants
}{
{
name: "Get global application configuration",
fields: fields{
ConfigStore: &mocks.ConfigStore{
Config: &chronograf.Config{
Auth: chronograf.AuthConfig{
SuperAdminNewUsers: false,
},
},
},
},
wants: wants{
statusCode: 200,
contentType: "application/json",
body: `{"links":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":{"superAdminNewUsers":false}}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
ConfigStore: tt.fields.ConfigStore,
},
Logger: &chronograf.NoopLogger{},
}
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://any.url", nil)
s.Config(w, r)
resp := w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wants.statusCode {
t.Errorf("%q. Config() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. Config() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. Config() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body)
}
})
}
}
func TestAuthConfig(t *testing.T) {
type fields struct {
ConfigStore chronograf.ConfigStore
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
wants wants
}{
{
name: "Get auth configuration",
fields: fields{
ConfigStore: &mocks.ConfigStore{
Config: &chronograf.Config{
Auth: chronograf.AuthConfig{
SuperAdminNewUsers: false,
},
},
},
},
wants: wants{
statusCode: 200,
contentType: "application/json",
body: `{"superAdminNewUsers": false, "links": {"self": "/chronograf/v1/config/auth"}}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
ConfigStore: tt.fields.ConfigStore,
},
Logger: &chronograf.NoopLogger{},
}
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://any.url", nil)
s.AuthConfig(w, r)
resp := w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wants.statusCode {
t.Errorf("%q. Config() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. Config() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. Config() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body)
}
})
}
}
func TestReplaceAuthConfig(t *testing.T) {
type fields struct {
ConfigStore chronograf.ConfigStore
}
type args struct {
payload interface{} // expects JSON serializable struct
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "Set auth configuration",
fields: fields{
ConfigStore: &mocks.ConfigStore{
Config: &chronograf.Config{
Auth: chronograf.AuthConfig{
SuperAdminNewUsers: false,
},
},
},
},
args: args{
payload: chronograf.AuthConfig{
SuperAdminNewUsers: true,
},
},
wants: wants{
statusCode: 200,
contentType: "application/json",
body: `{"superAdminNewUsers": true, "links": {"self": "/chronograf/v1/config/auth"}}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
ConfigStore: tt.fields.ConfigStore,
},
Logger: &chronograf.NoopLogger{},
}
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://any.url", nil)
buf, _ := json.Marshal(tt.args.payload)
r.Body = ioutil.NopCloser(bytes.NewReader(buf))
s.ReplaceAuthConfig(w, r)
resp := w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wants.statusCode {
t.Errorf("%q. Config() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. Config() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. Config() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body)
}
})
}
}

View File

@ -1,30 +0,0 @@
package server
import (
"context"
)
type serverContextKey string
// ServerContextKey is the key used to specify that the
// server is making the requet via context
const ServerContextKey = serverContextKey("server")
// hasServerContext specifies if the context contains
// the ServerContextKey and that the value stored there is true
func hasServerContext(ctx context.Context) bool {
// prevents panic in case of nil context
if ctx == nil {
return false
}
sa, ok := ctx.Value(ServerContextKey).(bool)
// should never happen
if !ok {
return false
}
return sa
}
func serverContext(ctx context.Context) context.Context {
return context.WithValue(ctx, ServerContextKey, true)
}

View File

@ -1,287 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"github.com/influxdata/influxdb/v2/chronograf"
)
type dashboardLinks struct {
Self string `json:"self"` // Self link mapping to this resource
Cells string `json:"cells"` // Cells link to the cells endpoint
Templates string `json:"templates"` // Templates link to the templates endpoint
}
type dashboardResponse struct {
ID chronograf.DashboardID `json:"id"`
Cells []dashboardCellResponse `json:"cells"`
Templates []templateResponse `json:"templates"`
Name string `json:"name"`
Organization string `json:"organization"`
Links dashboardLinks `json:"links"`
}
type getDashboardsResponse struct {
Dashboards []*dashboardResponse `json:"dashboards"`
}
func newDashboardResponse(d chronograf.Dashboard) *dashboardResponse {
base := "/chronograf/v1/dashboards"
dd := AddQueryConfigs(DashboardDefaults(d))
cells := newCellResponses(dd.ID, dd.Cells)
templates := newTemplateResponses(dd.ID, dd.Templates)
return &dashboardResponse{
ID: dd.ID,
Name: dd.Name,
Cells: cells,
Templates: templates,
Organization: d.Organization,
Links: dashboardLinks{
Self: fmt.Sprintf("%s/%d", base, dd.ID),
Cells: fmt.Sprintf("%s/%d/cells", base, dd.ID),
Templates: fmt.Sprintf("%s/%d/templates", base, dd.ID),
},
}
}
// Dashboards returns all dashboards within the store
func (s *Service) Dashboards(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
dashboards, err := s.Store.Dashboards(ctx).All(ctx)
if err != nil {
Error(w, http.StatusInternalServerError, "Error loading dashboards", s.Logger)
return
}
res := getDashboardsResponse{
Dashboards: []*dashboardResponse{},
}
for _, dashboard := range dashboards {
res.Dashboards = append(res.Dashboards, newDashboardResponse(dashboard))
}
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// DashboardID returns a single specified dashboard
func (s *Service) DashboardID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
e, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
res := newDashboardResponse(e)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// NewDashboard creates and returns a new dashboard object
func (s *Service) NewDashboard(w http.ResponseWriter, r *http.Request) {
var dashboard chronograf.Dashboard
var err error
if err := json.NewDecoder(r.Body).Decode(&dashboard); err != nil {
invalidJSON(w, s.Logger)
return
}
ctx := r.Context()
defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
if err := ValidDashboardRequest(&dashboard, defaultOrg.ID); err != nil {
invalidData(w, err, s.Logger)
return
}
if dashboard, err = s.Store.Dashboards(ctx).Add(r.Context(), dashboard); err != nil {
msg := fmt.Errorf("error storing dashboard %v: %v", dashboard, err)
unknownErrorWithMessage(w, msg, s.Logger)
return
}
res := newDashboardResponse(dashboard)
location(w, res.Links.Self)
encodeJSON(w, http.StatusCreated, res, s.Logger)
}
// RemoveDashboard deletes a dashboard
func (s *Service) RemoveDashboard(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
e, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
if err := s.Store.Dashboards(ctx).Delete(ctx, e); err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// ReplaceDashboard completely replaces a dashboard
func (s *Service) ReplaceDashboard(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
idParam, err := paramID("id", r)
if err != nil {
msg := fmt.Sprintf("Could not parse dashboard ID: %s", err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
}
id := chronograf.DashboardID(idParam)
_, err = s.Store.Dashboards(ctx).Get(ctx, id)
if err != nil {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id), s.Logger)
return
}
var req chronograf.Dashboard
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
req.ID = id
defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
if err := ValidDashboardRequest(&req, defaultOrg.ID); err != nil {
invalidData(w, err, s.Logger)
return
}
if err := s.Store.Dashboards(ctx).Update(ctx, req); err != nil {
msg := fmt.Sprintf("Error updating dashboard ID %d: %v", id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
res := newDashboardResponse(req)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// UpdateDashboard completely updates either the dashboard name or the cells
func (s *Service) UpdateDashboard(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
idParam, err := paramID("id", r)
if err != nil {
msg := fmt.Sprintf("Could not parse dashboard ID: %s", err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
id := chronograf.DashboardID(idParam)
orig, err := s.Store.Dashboards(ctx).Get(ctx, id)
if err != nil {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %d not found", id), s.Logger)
return
}
var req chronograf.Dashboard
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
req.ID = id
if req.Name != "" {
orig.Name = req.Name
} else if len(req.Cells) > 0 {
defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
if err := ValidDashboardRequest(&req, defaultOrg.ID); err != nil {
invalidData(w, err, s.Logger)
return
}
orig.Cells = req.Cells
} else {
invalidData(w, fmt.Errorf("update must include either name or cells"), s.Logger)
return
}
if err := s.Store.Dashboards(ctx).Update(ctx, orig); err != nil {
msg := fmt.Sprintf("Error updating dashboard ID %d: %v", id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
res := newDashboardResponse(orig)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// ValidDashboardRequest verifies that the dashboard cells have a query
func ValidDashboardRequest(d *chronograf.Dashboard, defaultOrgID string) error {
if d.Organization == "" {
d.Organization = defaultOrgID
}
for i, c := range d.Cells {
if err := ValidDashboardCellRequest(&c); err != nil {
return err
}
d.Cells[i] = c
}
for _, t := range d.Templates {
if err := ValidTemplateRequest(&t); err != nil {
return err
}
}
(*d) = DashboardDefaults(*d)
return nil
}
// DashboardDefaults updates the dashboard with the default values
// if none are specified
func DashboardDefaults(d chronograf.Dashboard) (newDash chronograf.Dashboard) {
newDash.ID = d.ID
newDash.Templates = d.Templates
newDash.Name = d.Name
newDash.Organization = d.Organization
newDash.Cells = make([]chronograf.DashboardCell, len(d.Cells))
for i, c := range d.Cells {
CorrectWidthHeight(&c)
newDash.Cells[i] = c
}
return
}
// AddQueryConfigs updates all the celsl in the dashboard to have query config
// objects corresponding to their influxql queries.
func AddQueryConfigs(d chronograf.Dashboard) (newDash chronograf.Dashboard) {
newDash.ID = d.ID
newDash.Templates = d.Templates
newDash.Name = d.Name
newDash.Cells = make([]chronograf.DashboardCell, len(d.Cells))
for i, c := range d.Cells {
AddQueryConfig(&c)
newDash.Cells[i] = c
}
return
}

View File

@ -1,366 +0,0 @@
package server
import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestCorrectWidthHeight(t *testing.T) {
t.Parallel()
tests := []struct {
name string
cell chronograf.DashboardCell
want chronograf.DashboardCell
}{
{
name: "updates width",
cell: chronograf.DashboardCell{
W: 0,
H: 4,
},
want: chronograf.DashboardCell{
W: 4,
H: 4,
},
},
{
name: "updates height",
cell: chronograf.DashboardCell{
W: 4,
H: 0,
},
want: chronograf.DashboardCell{
W: 4,
H: 4,
},
},
{
name: "updates both",
cell: chronograf.DashboardCell{
W: 0,
H: 0,
},
want: chronograf.DashboardCell{
W: 4,
H: 4,
},
},
{
name: "updates neither",
cell: chronograf.DashboardCell{
W: 4,
H: 4,
},
want: chronograf.DashboardCell{
W: 4,
H: 4,
},
},
}
for _, tt := range tests {
if CorrectWidthHeight(&tt.cell); !reflect.DeepEqual(tt.cell, tt.want) {
t.Errorf("%q. CorrectWidthHeight() = %v, want %v", tt.name, tt.cell, tt.want)
}
}
}
func TestDashboardDefaults(t *testing.T) {
tests := []struct {
name string
d chronograf.Dashboard
want chronograf.Dashboard
}{
{
name: "Updates all cell widths/heights",
d: chronograf.Dashboard{
Cells: []chronograf.DashboardCell{
{
W: 0,
H: 0,
},
{
W: 2,
H: 2,
},
},
},
want: chronograf.Dashboard{
Cells: []chronograf.DashboardCell{
{
W: 4,
H: 4,
},
{
W: 2,
H: 2,
},
},
},
},
{
name: "Updates no cell",
d: chronograf.Dashboard{
Cells: []chronograf.DashboardCell{
{
W: 4,
H: 4,
}, {
W: 2,
H: 2,
},
},
},
want: chronograf.Dashboard{
Cells: []chronograf.DashboardCell{
{
W: 4,
H: 4,
},
{
W: 2,
H: 2,
},
},
},
},
}
for _, tt := range tests {
if actual := DashboardDefaults(tt.d); !reflect.DeepEqual(actual, tt.want) {
t.Errorf("%q. DashboardDefaults() = %v, want %v", tt.name, tt.d, tt.want)
}
}
}
func TestValidDashboardRequest(t *testing.T) {
tests := []struct {
name string
d chronograf.Dashboard
want chronograf.Dashboard
wantErr bool
}{
{
name: "Updates all cell widths/heights",
d: chronograf.Dashboard{
Organization: "1337",
Cells: []chronograf.DashboardCell{
{
W: 0,
H: 0,
Queries: []chronograf.DashboardQuery{
{
Command: "SELECT donors from hill_valley_preservation_society where time > 1985-10-25T08:00:00",
},
},
},
{
W: 2,
H: 2,
Queries: []chronograf.DashboardQuery{
{
Command: "SELECT winning_horses from grays_sports_alamanc where time > 1955-11-1T00:00:00",
},
},
},
},
},
want: chronograf.Dashboard{
Organization: "1337",
Cells: []chronograf.DashboardCell{
{
W: 4,
H: 4,
Queries: []chronograf.DashboardQuery{
{
Command: "SELECT donors from hill_valley_preservation_society where time > 1985-10-25T08:00:00",
},
},
},
{
W: 2,
H: 2,
Queries: []chronograf.DashboardQuery{
{
Command: "SELECT winning_horses from grays_sports_alamanc where time > 1955-11-1T00:00:00",
},
},
},
},
},
},
}
for _, tt := range tests {
// TODO(desa): this Okay?
err := ValidDashboardRequest(&tt.d, "0")
if (err != nil) != tt.wantErr {
t.Errorf("%q. ValidDashboardRequest() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if diff := cmp.Diff(tt.d, tt.want); diff != "" {
t.Errorf("%q. ValidDashboardRequest(). got/want diff:\n%s", tt.name, diff)
}
}
}
func Test_newDashboardResponse(t *testing.T) {
tests := []struct {
name string
d chronograf.Dashboard
want *dashboardResponse
}{
{
name: "creates a dashboard response",
d: chronograf.Dashboard{
Organization: "0",
Cells: []chronograf.DashboardCell{
{
ID: "a",
W: 0,
H: 0,
Queries: []chronograf.DashboardQuery{
{
Source: "/chronograf/v1/sources/1",
Command: "SELECT donors from hill_valley_preservation_society where time > '1985-10-25 08:00:00'",
Shifts: []chronograf.TimeShift{
{
Label: "Best Week Evar",
Unit: "d",
Quantity: "7",
},
},
},
},
Axes: map[string]chronograf.Axis{
"x": {
Bounds: []string{"0", "100"},
},
"y": {
Bounds: []string{"2", "95"},
Label: "foo",
},
},
},
{
ID: "b",
W: 0,
H: 0,
Queries: []chronograf.DashboardQuery{
{
Source: "/chronograf/v1/sources/2",
Command: "SELECT winning_horses from grays_sports_alamanc where time > now() - 15m",
},
},
},
},
},
want: &dashboardResponse{
Organization: "0",
Templates: []templateResponse{},
Cells: []dashboardCellResponse{
{
Links: dashboardCellLinks{
Self: "/chronograf/v1/dashboards/0/cells/a",
},
DashboardCell: chronograf.DashboardCell{
ID: "a",
W: 4,
H: 4,
Queries: []chronograf.DashboardQuery{
{
Command: "SELECT donors from hill_valley_preservation_society where time > '1985-10-25 08:00:00'",
Source: "/chronograf/v1/sources/1",
QueryConfig: chronograf.QueryConfig{
RawText: &[]string{"SELECT donors from hill_valley_preservation_society where time > '1985-10-25 08:00:00'"}[0],
Fields: []chronograf.Field{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
Tags: make(map[string][]string),
AreTagsAccepted: false,
Shifts: []chronograf.TimeShift{
{
Label: "Best Week Evar",
Unit: "d",
Quantity: "7",
},
},
},
},
},
CellColors: []chronograf.CellColor{},
Axes: map[string]chronograf.Axis{
"x": {
Bounds: []string{"0", "100"},
},
"y": {
Bounds: []string{"2", "95"},
Label: "foo",
},
"y2": {
Bounds: []string{"", ""},
},
},
},
},
{
Links: dashboardCellLinks{
Self: "/chronograf/v1/dashboards/0/cells/b",
},
DashboardCell: chronograf.DashboardCell{
ID: "b",
W: 4,
H: 4,
Axes: map[string]chronograf.Axis{
"x": {
Bounds: []string{"", ""},
},
"y": {
Bounds: []string{"", ""},
},
"y2": {
Bounds: []string{"", ""},
},
},
CellColors: []chronograf.CellColor{},
Queries: []chronograf.DashboardQuery{
{
Command: "SELECT winning_horses from grays_sports_alamanc where time > now() - 15m",
Source: "/chronograf/v1/sources/2",
QueryConfig: chronograf.QueryConfig{
Measurement: "grays_sports_alamanc",
Fields: []chronograf.Field{
{
Type: "field",
Value: "winning_horses",
},
},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
Tags: make(map[string][]string),
AreTagsAccepted: false,
Range: &chronograf.DurationRange{
Lower: "now() - 15m",
},
},
},
},
},
},
},
Links: dashboardLinks{
Self: "/chronograf/v1/dashboards/0",
Cells: "/chronograf/v1/dashboards/0/cells",
Templates: "/chronograf/v1/dashboards/0/templates",
},
},
},
}
for _, tt := range tests {
if got := newDashboardResponse(tt.d); !cmp.Equal(got, tt.want) {
t.Errorf("%q. newDashboardResponse() = diff:\n%s", tt.name, cmp.Diff(got, tt.want))
}
}
}

View File

@ -1,519 +0,0 @@
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
)
const (
limitQuery = "limit"
offsetQuery = "offset"
)
type dbLinks struct {
Self string `json:"self"` // Self link mapping to this resource
RPs string `json:"retentionPolicies"` // URL for retention policies for this database
Measurements string `json:"measurements"` // URL for measurements for this database
}
type dbResponse struct {
Name string `json:"name"` // a unique string identifier for the database
Duration string `json:"duration,omitempty"` // the duration (when creating a default retention policy)
Replication int32 `json:"replication,omitempty"` // the replication factor (when creating a default retention policy)
ShardDuration string `json:"shardDuration,omitempty"` // the shard duration (when creating a default retention policy)
RPs []rpResponse `json:"retentionPolicies"` // RPs are the retention policies for a database
Links dbLinks `json:"links"` // Links are URI locations related to the database
}
// newDBResponse creates the response for the /databases endpoint
func newDBResponse(srcID int, db string, rps []rpResponse) dbResponse {
base := "/chronograf/v1/sources"
return dbResponse{
Name: db,
RPs: rps,
Links: dbLinks{
Self: fmt.Sprintf("%s/%d/dbs/%s", base, srcID, db),
RPs: fmt.Sprintf("%s/%d/dbs/%s/rps", base, srcID, db),
Measurements: fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=100&offset=0", base, srcID, db),
},
}
}
type dbsResponse struct {
Databases []dbResponse `json:"databases"`
}
type rpLinks struct {
Self string `json:"self"` // Self link mapping to this resource
}
type rpResponse struct {
Name string `json:"name"` // a unique string identifier for the retention policy
Duration string `json:"duration"` // the duration
Replication int32 `json:"replication"` // the replication factor
ShardDuration string `json:"shardDuration"` // the shard duration
Default bool `json:"isDefault"` // whether the RP should be the default
Links rpLinks `json:"links"` // Links are URI locations related to the database
}
// WithLinks adds links to an rpResponse in place
func (r *rpResponse) WithLinks(srcID int, db string) {
base := "/chronograf/v1/sources"
r.Links = rpLinks{
Self: fmt.Sprintf("%s/%d/dbs/%s/rps/%s", base, srcID, db, r.Name),
}
}
type measurementLinks struct {
Self string `json:"self"`
First string `json:"first"`
Next string `json:"next,omitempty"`
Prev string `json:"prev,omitempty"`
}
func newMeasurementLinks(src int, db string, limit, offset int) measurementLinks {
base := "/chronograf/v1/sources"
res := measurementLinks{
Self: fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=%d&offset=%d", base, src, db, limit, offset),
First: fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=%d&offset=0", base, src, db, limit),
Next: fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=%d&offset=%d", base, src, db, limit, offset+limit),
}
if offset-limit > 0 {
res.Prev = fmt.Sprintf("%s/%d/dbs/%s/measurements?limit=%d&offset=%d", base, src, db, limit, offset-limit)
}
return res
}
type measurementsResponse struct {
Measurements []chronograf.Measurement `json:"measurements"` // names of all measurements
Links measurementLinks `json:"links"` // Links are the URI locations for measurements pages
}
// GetDatabases queries the list of all databases for a source
func (h *Service) GetDatabases(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
dbsvc := h.Databases
if err = dbsvc.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
databases, err := dbsvc.AllDB(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
dbs := make([]dbResponse, len(databases))
for i, d := range databases {
rps, err := h.allRPs(ctx, dbsvc, srcID, d.Name)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
dbs[i] = newDBResponse(srcID, d.Name, rps)
}
res := dbsResponse{
Databases: dbs,
}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// NewDatabase creates a new database within the datastore
func (h *Service) NewDatabase(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
dbsvc := h.Databases
if err = dbsvc.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
postedDB := &chronograf.Database{}
if err := json.NewDecoder(r.Body).Decode(postedDB); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := ValidDatabaseRequest(postedDB); err != nil {
invalidData(w, err, h.Logger)
return
}
database, err := dbsvc.CreateDB(ctx, postedDB)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rps, err := h.allRPs(ctx, dbsvc, srcID, database.Name)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
res := newDBResponse(srcID, database.Name, rps)
encodeJSON(w, http.StatusCreated, res, h.Logger)
}
// DropDatabase removes a database from a data source
func (h *Service) DropDatabase(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
dbsvc := h.Databases
if err = dbsvc.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
db := httprouter.ParamsFromContext(ctx).ByName("db")
dropErr := dbsvc.DropDB(ctx, db)
if dropErr != nil {
Error(w, http.StatusBadRequest, dropErr.Error(), h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// RetentionPolicies lists retention policies within a database
func (h *Service) RetentionPolicies(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
dbsvc := h.Databases
if err = dbsvc.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
db := httprouter.ParamsFromContext(ctx).ByName("db")
res, err := h.allRPs(ctx, dbsvc, srcID, db)
if err != nil {
msg := fmt.Sprintf("unable to connect get RPs %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
func (h *Service) allRPs(ctx context.Context, dbsvc chronograf.Databases, srcID int, db string) ([]rpResponse, error) {
allRP, err := dbsvc.AllRP(ctx, db)
if err != nil {
return nil, err
}
rps := make([]rpResponse, len(allRP))
for i, rp := range allRP {
rp := rpResponse{
Name: rp.Name,
Duration: rp.Duration,
Replication: rp.Replication,
ShardDuration: rp.ShardDuration,
Default: rp.Default,
}
rp.WithLinks(srcID, db)
rps[i] = rp
}
return rps, nil
}
// NewRetentionPolicy creates a new retention policy for a database
func (h *Service) NewRetentionPolicy(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
dbsvc := h.Databases
if err = dbsvc.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
postedRP := &chronograf.RetentionPolicy{}
if err := json.NewDecoder(r.Body).Decode(postedRP); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := ValidRetentionPolicyRequest(postedRP); err != nil {
invalidData(w, err, h.Logger)
return
}
db := httprouter.ParamsFromContext(ctx).ByName("db")
rp, err := dbsvc.CreateRP(ctx, db, postedRP)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
res := rpResponse{
Name: rp.Name,
Duration: rp.Duration,
Replication: rp.Replication,
ShardDuration: rp.ShardDuration,
Default: rp.Default,
}
res.WithLinks(srcID, db)
encodeJSON(w, http.StatusCreated, res, h.Logger)
}
// UpdateRetentionPolicy modifies an existing retention policy for a database
func (h *Service) UpdateRetentionPolicy(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
dbsvc := h.Databases
if err = dbsvc.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
postedRP := &chronograf.RetentionPolicy{}
if err := json.NewDecoder(r.Body).Decode(postedRP); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := ValidRetentionPolicyRequest(postedRP); err != nil {
invalidData(w, err, h.Logger)
return
}
params := httprouter.ParamsFromContext(ctx)
db := params.ByName("db")
rp := params.ByName("rp")
p, err := dbsvc.UpdateRP(ctx, db, rp, postedRP)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
res := rpResponse{
Name: p.Name,
Duration: p.Duration,
Replication: p.Replication,
ShardDuration: p.ShardDuration,
Default: p.Default,
}
res.WithLinks(srcID, db)
encodeJSON(w, http.StatusCreated, res, h.Logger)
}
// DropRetentionPolicy removes a retention policy from a database
func (s *Service) DropRetentionPolicy(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
src, err := s.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, s.Logger)
return
}
dbsvc := s.Databases
if err = dbsvc.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
params := httprouter.ParamsFromContext(ctx)
db := params.ByName("db")
rp := params.ByName("rp")
dropErr := dbsvc.DropRP(ctx, db, rp)
if dropErr != nil {
Error(w, http.StatusBadRequest, dropErr.Error(), s.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// Measurements lists measurements within a database
func (h *Service) Measurements(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
limit, offset, err := validMeasurementQuery(r.URL.Query())
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
dbsvc := h.Databases
if err = dbsvc.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
db := httprouter.ParamsFromContext(ctx).ByName("db")
measurements, err := dbsvc.GetMeasurements(ctx, db, limit, offset)
if err != nil {
msg := fmt.Sprintf("Unable to get measurements %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
res := measurementsResponse{
Measurements: measurements,
Links: newMeasurementLinks(srcID, db, limit, offset),
}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
func validMeasurementQuery(query url.Values) (limit, offset int, err error) {
limitParam := query.Get(limitQuery)
if limitParam == "" {
limit = 100
} else {
limit, err = strconv.Atoi(limitParam)
if err != nil {
return
}
if limit <= 0 {
limit = 100
}
}
offsetParam := query.Get(offsetQuery)
if offsetParam == "" {
offset = 0
} else {
offset, err = strconv.Atoi(offsetParam)
if err != nil {
return
}
if offset < 0 {
offset = 0
}
}
return
}
// ValidDatabaseRequest checks if the database posted is valid
func ValidDatabaseRequest(d *chronograf.Database) error {
if len(d.Name) == 0 {
return fmt.Errorf("name is required")
}
return nil
}
// ValidRetentionPolicyRequest checks if a retention policy is valid on POST
func ValidRetentionPolicyRequest(rp *chronograf.RetentionPolicy) error {
if len(rp.Name) == 0 {
return fmt.Errorf("name is required")
}
if len(rp.Duration) == 0 {
return fmt.Errorf("duration is required")
}
if rp.Replication == 0 {
return fmt.Errorf("replication factor is invalid")
}
return nil
}

View File

@ -1,648 +0,0 @@
package server
import (
"context"
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
func TestService_GetDatabases(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
ServersStore chronograf.ServersStore
LayoutsStore chronograf.LayoutsStore
UsersStore chronograf.UsersStore
DashboardsStore chronograf.DashboardsStore
TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
Databases chronograf.Databases
}
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &Service{
Store: &Store{
SourcesStore: tt.fields.SourcesStore,
ServersStore: tt.fields.ServersStore,
LayoutsStore: tt.fields.LayoutsStore,
UsersStore: tt.fields.UsersStore,
DashboardsStore: tt.fields.DashboardsStore,
},
TimeSeriesClient: tt.fields.TimeSeriesClient,
Logger: tt.fields.Logger,
UseAuth: tt.fields.UseAuth,
Databases: tt.fields.Databases,
}
h.GetDatabases(tt.args.w, tt.args.r)
})
}
}
func TestService_NewDatabase(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
ServersStore chronograf.ServersStore
LayoutsStore chronograf.LayoutsStore
UsersStore chronograf.UsersStore
DashboardsStore chronograf.DashboardsStore
TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
Databases chronograf.Databases
}
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &Service{
Store: &Store{
SourcesStore: tt.fields.SourcesStore,
ServersStore: tt.fields.ServersStore,
LayoutsStore: tt.fields.LayoutsStore,
UsersStore: tt.fields.UsersStore,
DashboardsStore: tt.fields.DashboardsStore,
},
TimeSeriesClient: tt.fields.TimeSeriesClient,
Logger: tt.fields.Logger,
UseAuth: tt.fields.UseAuth,
Databases: tt.fields.Databases,
}
h.NewDatabase(tt.args.w, tt.args.r)
})
}
}
func TestService_DropDatabase(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
ServersStore chronograf.ServersStore
LayoutsStore chronograf.LayoutsStore
UsersStore chronograf.UsersStore
DashboardsStore chronograf.DashboardsStore
TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
Databases chronograf.Databases
}
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &Service{
Store: &Store{
SourcesStore: tt.fields.SourcesStore,
ServersStore: tt.fields.ServersStore,
LayoutsStore: tt.fields.LayoutsStore,
UsersStore: tt.fields.UsersStore,
DashboardsStore: tt.fields.DashboardsStore,
},
TimeSeriesClient: tt.fields.TimeSeriesClient,
Logger: tt.fields.Logger,
UseAuth: tt.fields.UseAuth,
Databases: tt.fields.Databases,
}
h.DropDatabase(tt.args.w, tt.args.r)
})
}
}
func TestService_RetentionPolicies(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
ServersStore chronograf.ServersStore
LayoutsStore chronograf.LayoutsStore
UsersStore chronograf.UsersStore
DashboardsStore chronograf.DashboardsStore
TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
Databases chronograf.Databases
}
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &Service{
Store: &Store{
SourcesStore: tt.fields.SourcesStore,
ServersStore: tt.fields.ServersStore,
LayoutsStore: tt.fields.LayoutsStore,
UsersStore: tt.fields.UsersStore,
DashboardsStore: tt.fields.DashboardsStore,
},
TimeSeriesClient: tt.fields.TimeSeriesClient,
Logger: tt.fields.Logger,
UseAuth: tt.fields.UseAuth,
Databases: tt.fields.Databases,
}
h.RetentionPolicies(tt.args.w, tt.args.r)
})
}
}
func TestService_NewRetentionPolicy(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
ServersStore chronograf.ServersStore
LayoutsStore chronograf.LayoutsStore
UsersStore chronograf.UsersStore
DashboardsStore chronograf.DashboardsStore
TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
Databases chronograf.Databases
}
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &Service{
Store: &Store{
SourcesStore: tt.fields.SourcesStore,
ServersStore: tt.fields.ServersStore,
LayoutsStore: tt.fields.LayoutsStore,
UsersStore: tt.fields.UsersStore,
DashboardsStore: tt.fields.DashboardsStore,
},
TimeSeriesClient: tt.fields.TimeSeriesClient,
Logger: tt.fields.Logger,
UseAuth: tt.fields.UseAuth,
Databases: tt.fields.Databases,
}
h.NewRetentionPolicy(tt.args.w, tt.args.r)
})
}
}
func TestService_UpdateRetentionPolicy(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
ServersStore chronograf.ServersStore
LayoutsStore chronograf.LayoutsStore
UsersStore chronograf.UsersStore
DashboardsStore chronograf.DashboardsStore
TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
Databases chronograf.Databases
}
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &Service{
Store: &Store{
SourcesStore: tt.fields.SourcesStore,
ServersStore: tt.fields.ServersStore,
LayoutsStore: tt.fields.LayoutsStore,
UsersStore: tt.fields.UsersStore,
DashboardsStore: tt.fields.DashboardsStore,
},
TimeSeriesClient: tt.fields.TimeSeriesClient,
Logger: tt.fields.Logger,
UseAuth: tt.fields.UseAuth,
Databases: tt.fields.Databases,
}
h.UpdateRetentionPolicy(tt.args.w, tt.args.r)
})
}
}
func TestService_DropRetentionPolicy(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
ServersStore chronograf.ServersStore
LayoutsStore chronograf.LayoutsStore
UsersStore chronograf.UsersStore
DashboardsStore chronograf.DashboardsStore
TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
Databases chronograf.Databases
}
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &Service{
Store: &Store{
SourcesStore: tt.fields.SourcesStore,
ServersStore: tt.fields.ServersStore,
LayoutsStore: tt.fields.LayoutsStore,
UsersStore: tt.fields.UsersStore,
DashboardsStore: tt.fields.DashboardsStore,
},
TimeSeriesClient: tt.fields.TimeSeriesClient,
Logger: tt.fields.Logger,
UseAuth: tt.fields.UseAuth,
Databases: tt.fields.Databases,
}
h.DropRetentionPolicy(tt.args.w, tt.args.r)
})
}
}
func TestService_Measurements(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
Logger chronograf.Logger
Databases chronograf.Databases
}
type args struct {
queryParams map[string]string
}
type wants struct {
statusCode int
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "Gets 100 measurements when no limit or offset provided",
fields: fields{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) {
return chronograf.Source{
ID: 0,
}, nil
},
},
Databases: &mocks.Databases{
ConnectF: func(context.Context, *chronograf.Source) error {
return nil
},
GetMeasurementsF: func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) {
return []chronograf.Measurement{
{
Name: "pineapple",
},
{
Name: "cubeapple",
},
{
Name: "pinecube",
},
}, nil
},
},
},
args: args{
queryParams: map[string]string{},
},
wants: wants{
statusCode: 200,
body: `{"measurements":[{"name":"pineapple"},{"name":"cubeapple"},{"name":"pinecube"}],"links":{"self":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","first":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","next":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=100"}}
`,
},
},
{
name: "Fails when invalid limit value provided",
fields: fields{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) {
return chronograf.Source{
ID: 0,
}, nil
},
},
},
args: args{
queryParams: map[string]string{
"limit": "joe",
},
},
wants: wants{
statusCode: 422,
body: `{"code":422,"message":"strconv.Atoi: parsing \"joe\": invalid syntax"}`,
},
},
{
name: "Fails when invalid offset value provided",
fields: fields{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) {
return chronograf.Source{
ID: 0,
}, nil
},
},
},
args: args{
queryParams: map[string]string{
"offset": "bob",
},
},
wants: wants{
statusCode: 422,
body: `{"code":422,"message":"strconv.Atoi: parsing \"bob\": invalid syntax"}`,
},
},
{
name: "Overrides limit less than or equal to 0 with limit 100",
fields: fields{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) {
return chronograf.Source{
ID: 0,
}, nil
},
},
Databases: &mocks.Databases{
ConnectF: func(context.Context, *chronograf.Source) error {
return nil
},
GetMeasurementsF: func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) {
return []chronograf.Measurement{
{
Name: "pineapple",
},
{
Name: "cubeapple",
},
{
Name: "pinecube",
},
}, nil
},
},
},
args: args{
queryParams: map[string]string{
"limit": "0",
},
},
wants: wants{
statusCode: 200,
body: `{"measurements":[{"name":"pineapple"},{"name":"cubeapple"},{"name":"pinecube"}],"links":{"self":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","first":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","next":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=100"}}
`,
},
},
{
name: "Overrides offset less than 0 with offset 0",
fields: fields{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) {
return chronograf.Source{
ID: 0,
}, nil
},
},
Databases: &mocks.Databases{
ConnectF: func(context.Context, *chronograf.Source) error {
return nil
},
GetMeasurementsF: func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) {
return []chronograf.Measurement{
{
Name: "pineapple",
},
{
Name: "cubeapple",
},
{
Name: "pinecube",
},
}, nil
},
},
},
args: args{
queryParams: map[string]string{
"offset": "-1337",
},
},
wants: wants{
statusCode: 200,
body: `{"measurements":[{"name":"pineapple"},{"name":"cubeapple"},{"name":"pinecube"}],"links":{"self":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","first":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=0","next":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=100\u0026offset=100"}}
`,
},
},
{
name: "Provides a prev link when offset exceeds limit",
fields: fields{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, srcID int) (chronograf.Source, error) {
return chronograf.Source{
ID: 0,
}, nil
},
},
Databases: &mocks.Databases{
ConnectF: func(context.Context, *chronograf.Source) error {
return nil
},
GetMeasurementsF: func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) {
return []chronograf.Measurement{
{
Name: "pineapple",
},
{
Name: "cubeapple",
},
{
Name: "pinecube",
},
{
Name: "billietta",
},
{
Name: "bobbetta",
},
{
Name: "bobcube",
},
}, nil
},
},
},
args: args{
queryParams: map[string]string{
"limit": "2",
"offset": "4",
},
},
wants: wants{
statusCode: 200,
body: `{"measurements":[{"name":"pineapple"},{"name":"cubeapple"},{"name":"pinecube"},{"name":"billietta"},{"name":"bobbetta"},{"name":"bobcube"}],"links":{"self":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=2\u0026offset=4","first":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=2\u0026offset=0","next":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=2\u0026offset=6","prev":"/chronograf/v1/sources/0/dbs/pineapples/measurements?limit=2\u0026offset=2"}}
`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logger := &chronograf.NoopLogger{}
h := &Service{
Store: &mocks.Store{
SourcesStore: tt.fields.SourcesStore,
},
Logger: logger,
Databases: tt.fields.Databases,
}
w := httptest.NewRecorder()
r := httptest.NewRequest(
"GET",
"http://any.url",
nil,
)
r = r.WithContext(context.WithValue(
context.TODO(),
httprouter.ParamsKey,
httprouter.Params{
{
Key: "id",
Value: "0",
},
{
Key: "db",
Value: "pineapples",
},
}))
q := r.URL.Query()
for key, value := range tt.args.queryParams {
q.Add(key, value)
}
r.URL.RawQuery = q.Encode()
h.Measurements(w, r)
resp := w.Result()
body, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
t.Error("TestService_Measurements not able to retrieve body")
}
var msmts measurementsResponse
if err := json.Unmarshal(body, &msmts); err != nil {
t.Error("TestService_Measurements not able to unmarshal JSON response")
}
if tt.wants.statusCode != resp.StatusCode {
t.Errorf("%q. StatusCode:\nwant\n%v\ngot\n%v", tt.name, tt.wants.statusCode, resp.StatusCode)
}
if tt.wants.body != string(body) {
t.Errorf("%q. Body:\nwant\n*%s*\ngot\n*%s*", tt.name, tt.wants.body, string(body))
}
})
}
}
func TestValidDatabaseRequest(t *testing.T) {
type args struct {
d *chronograf.Database
}
tests := []struct {
name string
args args
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ValidDatabaseRequest(tt.args.d); (err != nil) != tt.wantErr {
t.Errorf("ValidDatabaseRequest() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestValidRetentionPolicyRequest(t *testing.T) {
type args struct {
rp *chronograf.RetentionPolicy
}
tests := []struct {
name string
args args
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ValidRetentionPolicyRequest(tt.args.rp); (err != nil) != tt.wantErr {
t.Errorf("ValidRetentionPolicyRequest() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@ -1,27 +0,0 @@
package server
import (
"net/http"
"github.com/influxdata/influxdb/v2/chronograf"
)
type envResponse struct {
Links selfLinks `json:"links"`
TelegrafSystemInterval string `json:"telegrafSystemInterval"`
}
func newEnvResponse(env chronograf.Environment) *envResponse {
return &envResponse{
Links: selfLinks{
Self: "/chronograf/v1/env",
},
TelegrafSystemInterval: env.TelegrafSystemInterval.String(),
}
}
// Environment retrieves the global application configuration
func (s *Service) Environment(w http.ResponseWriter, r *http.Request) {
res := newEnvResponse(s.Env)
encodeJSON(w, http.StatusOK, res, s.Logger)
}

View File

@ -1,69 +0,0 @@
package server
import (
"io/ioutil"
"net/http/httptest"
"testing"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestEnvironment(t *testing.T) {
type fields struct {
Environment chronograf.Environment
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
wants wants
}{
{
name: "Get environment",
fields: fields{
Environment: chronograf.Environment{
TelegrafSystemInterval: 1 * time.Minute,
},
},
wants: wants{
statusCode: 200,
contentType: "application/json",
body: `{"links":{"self":"/chronograf/v1/env"},"telegrafSystemInterval":"1m0s"}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Env: tt.fields.Environment,
Logger: &chronograf.NoopLogger{},
}
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://any.url", nil)
s.Environment(w, r)
resp := w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wants.statusCode {
t.Errorf("%q. Config() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. Config() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. Config() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body)
}
})
}
}

View File

@ -1,7 +0,0 @@
package server
import "net/http"
func location(w http.ResponseWriter, self string) {
w.Header().Add("Location", self)
}

View File

@ -1,12 +0,0 @@
package server
import "net/http"
// HSTS add HTTP Strict Transport Security header with a max-age of two years
// Inspired from https://blog.bracebin.com/achieving-perfect-ssl-labs-score-with-go
func HSTS(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains")
next.ServeHTTP(w, r)
})
}

View File

@ -1,142 +0,0 @@
package server
import (
"crypto/tls"
"encoding/json"
"fmt"
"net"
"net/http"
"net/http/httputil"
"net/url"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/influx"
)
// ValidInfluxRequest checks if queries specify a command.
func ValidInfluxRequest(p chronograf.Query) error {
if p.Command == "" {
return fmt.Errorf("query field required")
}
return nil
}
type postInfluxResponse struct {
Results interface{} `json:"results"` // results from influx
}
// Influx proxies requests to influxdb.
func (s *Service) Influx(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
var req chronograf.Query
if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
if err = ValidInfluxRequest(req); err != nil {
invalidData(w, err, s.Logger)
return
}
ctx := r.Context()
src, err := s.Store.Sources(ctx).Get(ctx, id)
if err != nil {
notFound(w, id, s.Logger)
return
}
ts, err := s.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", id, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
response, err := ts.Query(ctx, req)
if err != nil {
if err == chronograf.ErrUpstreamTimeout {
msg := "Timeout waiting for Influx response"
Error(w, http.StatusRequestTimeout, msg, s.Logger)
return
}
// TODO: Here I want to return the error code from influx.
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
res := postInfluxResponse{
Results: response,
}
encodeJSON(w, http.StatusOK, res, s.Logger)
}
func (s *Service) Write(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
src, err := s.Store.Sources(ctx).Get(ctx, id)
if err != nil {
notFound(w, id, s.Logger)
return
}
u, err := url.Parse(src.URL)
if err != nil {
msg := fmt.Sprintf("Error parsing source url: %v", err)
Error(w, http.StatusUnprocessableEntity, msg, s.Logger)
return
}
u.Path = "/write"
u.RawQuery = r.URL.RawQuery
director := func(req *http.Request) {
// Set the Host header of the original source URL
req.Host = u.Host
req.URL = u
// Because we are acting as a proxy, influxdb needs to have the
// basic auth or bearer token information set as a header directly
auth := influx.DefaultAuthorization(&src)
auth.Set(req)
}
proxy := &httputil.ReverseProxy{
Director: director,
}
// The connection to influxdb is using a self-signed certificate.
// This modifies uses the same values as http.DefaultTransport but specifies
// InsecureSkipVerify
if src.InsecureSkipVerify {
proxy.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
proxy.ServeHTTP(w, r)
}

View File

@ -1,116 +0,0 @@
package server
import (
"bytes"
"context"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
func TestService_Influx(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
TimeSeries TimeSeriesClient
}
type args struct {
w *httptest.ResponseRecorder
r *http.Request
}
type want struct {
StatusCode int
ContentType string
Body string
}
tests := []struct {
name string
fields fields
args args
ID string
want want
}{
{
name: "Proxies request to Influxdb",
fields: fields{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
return chronograf.Source{
ID: 1337,
URL: "http://any.url",
}, nil
},
},
TimeSeries: &mocks.TimeSeries{
ConnectF: func(ctx context.Context, src *chronograf.Source) error {
return nil
},
QueryF: func(ctx context.Context, query chronograf.Query) (chronograf.Response, error) {
return mocks.NewResponse(
`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["cpu","cpu-total"],["cpu","cpu0"],["cpu","cpu1"],["cpu","cpu2"],["cpu","cpu3"],["host","pineapples-MBP"],["host","pineapples-MacBook-Pro.local"]]}]}]}`,
nil,
),
nil
},
},
},
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"POST",
"http://any.url",
ioutil.NopCloser(
bytes.NewReader([]byte(
`{"db":"bob", "rp":"joe", "query":"SELECT mean(\"usage_user\") FROM cpu WHERE \"cpu\" = 'cpu-total' AND time > now() - 10m GROUP BY host;"}`,
)),
),
),
},
ID: "1",
want: want{
StatusCode: http.StatusOK,
ContentType: "application/json",
Body: `{"results":{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["cpu","cpu-total"],["cpu","cpu0"],["cpu","cpu1"],["cpu","cpu2"],["cpu","cpu3"],["host","pineapples-MBP"],["host","pineapples-MacBook-Pro.local"]]}]}]}}
`,
},
},
}
for _, tt := range tests {
tt.args.r = tt.args.r.WithContext(context.WithValue(
context.TODO(),
httprouter.ParamsKey,
httprouter.Params{
{
Key: "id",
Value: tt.ID,
},
}))
h := &Service{
Store: &mocks.Store{
SourcesStore: tt.fields.SourcesStore,
},
TimeSeriesClient: tt.fields.TimeSeries,
}
h.Influx(tt.args.w, tt.args.r)
resp := tt.args.w.Result()
contentType := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.want.StatusCode {
t.Errorf("%q. Influx() = got %v, want %v", tt.name, resp.StatusCode, tt.want.StatusCode)
}
if contentType != tt.want.ContentType {
t.Errorf("%q. Influx() = got %v, want %v", tt.name, contentType, tt.want.ContentType)
}
if string(body) != tt.want.Body {
t.Errorf("%q. Influx() =\ngot ***%v***\nwant ***%v***\n", tt.name, string(body), tt.want.Body)
}
}
}

View File

@ -1,791 +0,0 @@
package server
// TODO(desa): resolve kapacitor dependency
//type postKapacitorRequest struct {
// Name *string `json:"name"` // User facing name of kapacitor instance.; Required: true
// URL *string `json:"url"` // URL for the kapacitor backend (e.g. http://localhost:9092);/ Required: true
// Username string `json:"username,omitempty"` // Username for authentication to kapacitor
// Password string `json:"password,omitempty"`
// InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the kapacitor is accepted.
// Active bool `json:"active"`
// Organization string `json:"organization"` // Organization is the organization ID that resource belongs to
//}
//
//func (p *postKapacitorRequest) Valid(defaultOrgID string) error {
// if p.Name == nil || p.URL == nil {
// return fmt.Errorf("name and url required")
// }
//
// if p.Organization == "" {
// p.Organization = defaultOrgID
// }
//
// url, err := url.ParseRequestURI(*p.URL)
// if err != nil {
// return fmt.Errorf("invalid source URI: %v", err)
// }
// if len(url.Scheme) == 0 {
// return fmt.Errorf("invalid URL; no URL scheme defined")
// }
//
// return nil
//}
//
//type kapaLinks struct {
// Proxy string `json:"proxy"` // URL location of proxy endpoint for this source
// Self string `json:"self"` // Self link mapping to this resource
// Rules string `json:"rules"` // Rules link for defining roles alerts for kapacitor
// Tasks string `json:"tasks"` // Tasks link to define a task against the proxy
// Ping string `json:"ping"` // Ping path to kapacitor
//}
//
//type kapacitor struct {
// ID int `json:"id,string"` // Unique identifier representing a kapacitor instance.
// Name string `json:"name"` // User facing name of kapacitor instance.
// URL string `json:"url"` // URL for the kapacitor backend (e.g. http://localhost:9092)
// Username string `json:"username,omitempty"` // Username for authentication to kapacitor
// Password string `json:"password,omitempty"`
// InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the kapacitor is accepted.
// Active bool `json:"active"`
// Links kapaLinks `json:"links"` // Links are URI locations related to kapacitor
//}
//
//// NewKapacitor adds valid kapacitor store store.
//func (s *Service) NewKapacitor(w http.ResponseWriter, r *http.Request) {
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// _, err = s.Store.Sources(ctx).Get(ctx, srcID)
// if err != nil {
// notFound(w, srcID, s.Logger)
// return
// }
//
// var req postKapacitorRequest
// if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
// invalidJSON(w, s.Logger)
// return
// }
//
// defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)
// if err != nil {
// unknownErrorWithMessage(w, err, s.Logger)
// return
// }
//
// if err := req.Valid(defaultOrg.ID); err != nil {
// invalidData(w, err, s.Logger)
// return
// }
//
// srv := chronograf.Server{
// SrcID: srcID,
// Name: *req.Name,
// Username: req.Username,
// Password: req.Password,
// InsecureSkipVerify: req.InsecureSkipVerify,
// URL: *req.URL,
// Active: req.Active,
// Organization: req.Organization,
// }
//
// if srv, err = s.Store.Servers(ctx).Add(ctx, srv); err != nil {
// msg := fmt.Errorf("error storing kapacitor %v: %v", req, err)
// unknownErrorWithMessage(w, msg, s.Logger)
// return
// }
//
// res := newKapacitor(srv)
// location(w, res.Links.Self)
// encodeJSON(w, http.StatusCreated, res, s.Logger)
//}
//
//func newKapacitor(srv chronograf.Server) kapacitor {
// httpAPISrcs := "/chronograf/v1/sources"
// return kapacitor{
// ID: srv.ID,
// Name: srv.Name,
// Username: srv.Username,
// URL: srv.URL,
// Active: srv.Active,
// InsecureSkipVerify: srv.InsecureSkipVerify,
// Links: kapaLinks{
// Self: fmt.Sprintf("%s/%d/kapacitors/%d", httpAPISrcs, srv.SrcID, srv.ID),
// Proxy: fmt.Sprintf("%s/%d/kapacitors/%d/proxy", httpAPISrcs, srv.SrcID, srv.ID),
// Rules: fmt.Sprintf("%s/%d/kapacitors/%d/rules", httpAPISrcs, srv.SrcID, srv.ID),
// Tasks: fmt.Sprintf("%s/%d/kapacitors/%d/proxy?path=/kapacitor/v1/tasks", httpAPISrcs, srv.SrcID, srv.ID),
// Ping: fmt.Sprintf("%s/%d/kapacitors/%d/proxy?path=/kapacitor/v1/ping", httpAPISrcs, srv.SrcID, srv.ID),
// },
// }
//}
//
//type kapacitors struct {
// Kapacitors []kapacitor `json:"kapacitors"`
//}
//
//// Kapacitors retrieves all kapacitors from store.
//func (s *Service) Kapacitors(w http.ResponseWriter, r *http.Request) {
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// mrSrvs, err := s.Store.Servers(ctx).All(ctx)
// if err != nil {
// Error(w, http.StatusInternalServerError, "Error loading kapacitors", s.Logger)
// return
// }
//
// srvs := []kapacitor{}
// for _, srv := range mrSrvs {
// if srv.SrcID == srcID && srv.Type == "" {
// srvs = append(srvs, newKapacitor(srv))
// }
// }
//
// res := kapacitors{
// Kapacitors: srvs,
// }
//
// encodeJSON(w, http.StatusOK, res, s.Logger)
//}
//
//// KapacitorsID retrieves a kapacitor with ID from store.
//func (s *Service) KapacitorsID(w http.ResponseWriter, r *http.Request) {
// id, err := paramID("kid", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// srv, err := s.Store.Servers(ctx).Get(ctx, id)
// if err != nil || srv.SrcID != srcID || srv.Type != "" {
// notFound(w, id, s.Logger)
// return
// }
//
// res := newKapacitor(srv)
// encodeJSON(w, http.StatusOK, res, s.Logger)
//}
//
//// RemoveKapacitor deletes kapacitor from store.
//func (s *Service) RemoveKapacitor(w http.ResponseWriter, r *http.Request) {
// id, err := paramID("kid", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// srv, err := s.Store.Servers(ctx).Get(ctx, id)
// if err != nil || srv.SrcID != srcID || srv.Type != "" {
// notFound(w, id, s.Logger)
// return
// }
//
// if err = s.Store.Servers(ctx).Delete(ctx, srv); err != nil {
// unknownErrorWithMessage(w, err, s.Logger)
// return
// }
//
// w.WriteHeader(http.StatusNoContent)
//}
//
//type patchKapacitorRequest struct {
// Name *string `json:"name,omitempty"` // User facing name of kapacitor instance.
// URL *string `json:"url,omitempty"` // URL for the kapacitor
// Username *string `json:"username,omitempty"` // Username for kapacitor auth
// Password *string `json:"password,omitempty"`
// InsecureSkipVerify *bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the kapacitor is accepted.
// Active *bool `json:"active"`
//}
//
//func (p *patchKapacitorRequest) Valid() error {
// if p.URL != nil {
// url, err := url.ParseRequestURI(*p.URL)
// if err != nil {
// return fmt.Errorf("invalid source URI: %v", err)
// }
// if len(url.Scheme) == 0 {
// return fmt.Errorf("invalid URL; no URL scheme defined")
// }
// }
// return nil
//}
//
//// UpdateKapacitor incrementally updates a kapacitor definition in the store
//func (s *Service) UpdateKapacitor(w http.ResponseWriter, r *http.Request) {
// id, err := paramID("kid", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// srv, err := s.Store.Servers(ctx).Get(ctx, id)
// if err != nil || srv.SrcID != srcID || srv.Type != "" {
// notFound(w, id, s.Logger)
// return
// }
//
// var req patchKapacitorRequest
// if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
// invalidJSON(w, s.Logger)
// return
// }
//
// if err := req.Valid(); err != nil {
// invalidData(w, err, s.Logger)
// return
// }
//
// if req.Name != nil {
// srv.Name = *req.Name
// }
// if req.URL != nil {
// srv.URL = *req.URL
// }
// if req.Password != nil {
// srv.Password = *req.Password
// }
// if req.Username != nil {
// srv.Username = *req.Username
// }
// if req.InsecureSkipVerify != nil {
// srv.InsecureSkipVerify = *req.InsecureSkipVerify
// }
// if req.Active != nil {
// srv.Active = *req.Active
// }
//
// if err := s.Store.Servers(ctx).Update(ctx, srv); err != nil {
// msg := fmt.Sprintf("Error updating kapacitor ID %d", id)
// Error(w, http.StatusInternalServerError, msg, s.Logger)
// return
// }
//
// res := newKapacitor(srv)
// encodeJSON(w, http.StatusOK, res, s.Logger)
//}
//
//// KapacitorRulesPost proxies POST to kapacitor
//func (s *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) {
// id, err := paramID("kid", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// srv, err := s.Store.Servers(ctx).Get(ctx, id)
// if err != nil || srv.SrcID != srcID {
// notFound(w, id, s.Logger)
// return
// }
//
// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify)
//
// var req chronograf.AlertRule
// if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
// invalidData(w, err, s.Logger)
// return
// }
// // TODO: validate this data
// /*
// if err := req.Valid(); err != nil {
// invalidData(w, err)
// return
// }
// */
//
// if req.Name == "" {
// req.Name = req.ID
// }
//
// req.ID = ""
// task, err := c.Create(ctx, req)
// if err != nil {
// invalidData(w, err, s.Logger)
// return
// }
// res := newAlertResponse(task, srv.SrcID, srv.ID)
// location(w, res.Links.Self)
// encodeJSON(w, http.StatusCreated, res, s.Logger)
//}
//
//type alertLinks struct {
// Self string `json:"self"`
// Kapacitor string `json:"kapacitor"`
// Output string `json:"output"`
//}
//
//type alertResponse struct {
// chronograf.AlertRule
// Links alertLinks `json:"links"`
//}
//
//// newAlertResponse formats task into an alertResponse
//func newAlertResponse(task *kapa.Task, srcID, kapaID int) *alertResponse {
// res := &alertResponse{
// AlertRule: task.Rule,
// Links: alertLinks{
// Self: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/rules/%s", srcID, kapaID, task.ID),
// Kapacitor: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/proxy?path=%s", srcID, kapaID, url.QueryEscape(task.Href)),
// Output: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/proxy?path=%s", srcID, kapaID, url.QueryEscape(task.HrefOutput)),
// },
// }
//
// if res.AlertNodes.Alerta == nil {
// res.AlertNodes.Alerta = []*chronograf.Alerta{}
// }
//
// for i, a := range res.AlertNodes.Alerta {
// if a.Service == nil {
// a.Service = []string{}
// res.AlertNodes.Alerta[i] = a
// }
// }
//
// if res.AlertNodes.Email == nil {
// res.AlertNodes.Email = []*chronograf.Email{}
// }
//
// for i, a := range res.AlertNodes.Email {
// if a.To == nil {
// a.To = []string{}
// res.AlertNodes.Email[i] = a
// }
// }
//
// if res.AlertNodes.Exec == nil {
// res.AlertNodes.Exec = []*chronograf.Exec{}
// }
//
// for i, a := range res.AlertNodes.Exec {
// if a.Command == nil {
// a.Command = []string{}
// res.AlertNodes.Exec[i] = a
// }
// }
//
// if res.AlertNodes.HipChat == nil {
// res.AlertNodes.HipChat = []*chronograf.HipChat{}
// }
//
// if res.AlertNodes.Kafka == nil {
// res.AlertNodes.Kafka = []*chronograf.Kafka{}
// }
//
// if res.AlertNodes.Log == nil {
// res.AlertNodes.Log = []*chronograf.Log{}
// }
//
// if res.AlertNodes.OpsGenie == nil {
// res.AlertNodes.OpsGenie = []*chronograf.OpsGenie{}
// }
//
// for i, a := range res.AlertNodes.OpsGenie {
// if a.Teams == nil {
// a.Teams = []string{}
// res.AlertNodes.OpsGenie[i] = a
// }
//
// if a.Recipients == nil {
// a.Recipients = []string{}
// res.AlertNodes.OpsGenie[i] = a
// }
// }
//
// if res.AlertNodes.OpsGenie2 == nil {
// res.AlertNodes.OpsGenie2 = []*chronograf.OpsGenie{}
// }
//
// for i, a := range res.AlertNodes.OpsGenie2 {
// if a.Teams == nil {
// a.Teams = []string{}
// res.AlertNodes.OpsGenie2[i] = a
// }
//
// if a.Recipients == nil {
// a.Recipients = []string{}
// res.AlertNodes.OpsGenie2[i] = a
// }
// }
//
// if res.AlertNodes.PagerDuty == nil {
// res.AlertNodes.PagerDuty = []*chronograf.PagerDuty{}
// }
//
// if res.AlertNodes.PagerDuty2 == nil {
// res.AlertNodes.PagerDuty2 = []*chronograf.PagerDuty{}
// }
//
// if res.AlertNodes.Posts == nil {
// res.AlertNodes.Posts = []*chronograf.Post{}
// }
//
// for i, a := range res.AlertNodes.Posts {
// if a.Headers == nil {
// a.Headers = map[string]string{}
// res.AlertNodes.Posts[i] = a
// }
// }
//
// if res.AlertNodes.Pushover == nil {
// res.AlertNodes.Pushover = []*chronograf.Pushover{}
// }
//
// if res.AlertNodes.Sensu == nil {
// res.AlertNodes.Sensu = []*chronograf.Sensu{}
// }
//
// for i, a := range res.AlertNodes.Sensu {
// if a.Handlers == nil {
// a.Handlers = []string{}
// res.AlertNodes.Sensu[i] = a
// }
// }
//
// if res.AlertNodes.Slack == nil {
// res.AlertNodes.Slack = []*chronograf.Slack{}
// }
//
// if res.AlertNodes.Talk == nil {
// res.AlertNodes.Talk = []*chronograf.Talk{}
// }
//
// if res.AlertNodes.TCPs == nil {
// res.AlertNodes.TCPs = []*chronograf.TCP{}
// }
//
// if res.AlertNodes.Telegram == nil {
// res.AlertNodes.Telegram = []*chronograf.Telegram{}
// }
//
// if res.AlertNodes.VictorOps == nil {
// res.AlertNodes.VictorOps = []*chronograf.VictorOps{}
// }
//
// if res.Query != nil {
// if res.Query.ID == "" {
// res.Query.ID = res.ID
// }
//
// if res.Query.Fields == nil {
// res.Query.Fields = make([]chronograf.Field, 0)
// }
//
// if res.Query.GroupBy.Tags == nil {
// res.Query.GroupBy.Tags = make([]string, 0)
// }
//
// if res.Query.Tags == nil {
// res.Query.Tags = make(map[string][]string)
// }
// }
// return res
//}
//
//// ValidRuleRequest checks if the requested rule change is valid
//func ValidRuleRequest(rule chronograf.AlertRule) error {
// if rule.Query == nil {
// return fmt.Errorf("invalid alert rule: no query defined")
// }
// var hasFuncs bool
// for _, f := range rule.Query.Fields {
// if f.Type == "func" && len(f.Args) > 0 {
// hasFuncs = true
// }
// }
// // All kapacitor rules with functions must have a window that is applied
// // every amount of time
// if rule.Every == "" && hasFuncs {
// return fmt.Errorf(`invalid alert rule: functions require an "every" window`)
// }
// return nil
//}
//
//// KapacitorRulesPut proxies PATCH to kapacitor
//func (s *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) {
// id, err := paramID("kid", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// srv, err := s.Store.Servers(ctx).Get(ctx, id)
// if err != nil || srv.SrcID != srcID {
// notFound(w, id, s.Logger)
// return
// }
//
// tid := httprouter.GetParamFromContext(ctx, "tid")
// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify)
// var req chronograf.AlertRule
// if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
// invalidData(w, err, s.Logger)
// return
// }
// // TODO: validate this data
// /*
// if err := req.Valid(); err != nil {
// invalidData(w, err)
// return
// }
// */
//
// // Check if the rule exists and is scoped correctly
// if _, err = c.Get(ctx, tid); err != nil {
// if err == chronograf.ErrAlertNotFound {
// notFound(w, id, s.Logger)
// return
// }
// Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
// return
// }
//
// // Replace alert completely with this new alert.
// req.ID = tid
// task, err := c.Update(ctx, c.Href(tid), req)
// if err != nil {
// invalidData(w, err, s.Logger)
// return
// }
// res := newAlertResponse(task, srv.SrcID, srv.ID)
// encodeJSON(w, http.StatusOK, res, s.Logger)
//}
//
//// KapacitorStatus is the current state of a running task
//type KapacitorStatus struct {
// Status string `json:"status"`
//}
//
//// Valid check if the kapacitor status is enabled or disabled
//func (k *KapacitorStatus) Valid() error {
// if k.Status == "enabled" || k.Status == "disabled" {
// return nil
// }
// return fmt.Errorf("invalid Kapacitor status: %s", k.Status)
//}
//
//// KapacitorRulesStatus proxies PATCH to kapacitor to enable/disable tasks
//func (s *Service) KapacitorRulesStatus(w http.ResponseWriter, r *http.Request) {
// id, err := paramID("kid", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// srv, err := s.Store.Servers(ctx).Get(ctx, id)
// if err != nil || srv.SrcID != srcID {
// notFound(w, id, s.Logger)
// return
// }
//
// tid := httprouter.GetParamFromContext(ctx, "tid")
// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify)
//
// var req KapacitorStatus
// if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
// invalidJSON(w, s.Logger)
// return
// }
// if err := req.Valid(); err != nil {
// invalidData(w, err, s.Logger)
// return
// }
//
// // Check if the rule exists and is scoped correctly
// _, err = c.Get(ctx, tid)
// if err != nil {
// if err == chronograf.ErrAlertNotFound {
// notFound(w, id, s.Logger)
// return
// }
// Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
// return
// }
//
// var task *kapa.Task
// if req.Status == "enabled" {
// task, err = c.Enable(ctx, c.Href(tid))
// } else {
// task, err = c.Disable(ctx, c.Href(tid))
// }
//
// if err != nil {
// Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
// return
// }
//
// res := newAlertResponse(task, srv.SrcID, srv.ID)
// encodeJSON(w, http.StatusOK, res, s.Logger)
//}
//
//// KapacitorRulesGet retrieves all rules
//func (s *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) {
// id, err := paramID("kid", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// srv, err := s.Store.Servers(ctx).Get(ctx, id)
// if err != nil || srv.SrcID != srcID {
// notFound(w, id, s.Logger)
// return
// }
//
// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify)
// tasks, err := c.All(ctx)
// if err != nil {
// Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
// return
// }
//
// res := allAlertsResponse{
// Rules: []*alertResponse{},
// }
// for _, task := range tasks {
// ar := newAlertResponse(task, srv.SrcID, srv.ID)
// res.Rules = append(res.Rules, ar)
// }
// encodeJSON(w, http.StatusOK, res, s.Logger)
//}
//
//type allAlertsResponse struct {
// Rules []*alertResponse `json:"rules"`
//}
//
//// KapacitorRulesID retrieves specific task
//func (s *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) {
// id, err := paramID("kid", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// srv, err := s.Store.Servers(ctx).Get(ctx, id)
// if err != nil || srv.SrcID != srcID {
// notFound(w, id, s.Logger)
// return
// }
// tid := httprouter.GetParamFromContext(ctx, "tid")
//
// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify)
//
// // Check if the rule exists within scope
// task, err := c.Get(ctx, tid)
// if err != nil {
// if err == chronograf.ErrAlertNotFound {
// notFound(w, id, s.Logger)
// return
// }
// Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
// return
// }
//
// res := newAlertResponse(task, srv.SrcID, srv.ID)
// encodeJSON(w, http.StatusOK, res, s.Logger)
//}
//
//// KapacitorRulesDelete proxies DELETE to kapacitor
//func (s *Service) KapacitorRulesDelete(w http.ResponseWriter, r *http.Request) {
// id, err := paramID("kid", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// srcID, err := paramID("id", r)
// if err != nil {
// Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
// return
// }
//
// ctx := r.Context()
// srv, err := s.Store.Servers(ctx).Get(ctx, id)
// if err != nil || srv.SrcID != srcID {
// notFound(w, id, s.Logger)
// return
// }
//
// c := kapa.NewClient(srv.URL, srv.Username, srv.Password, srv.InsecureSkipVerify)
//
// tid := httprouter.GetParamFromContext(ctx, "tid")
// // Check if the rule is linked to this server and kapacitor
// if _, err := c.Get(ctx, tid); err != nil {
// if err == chronograf.ErrAlertNotFound {
// notFound(w, id, s.Logger)
// return
// }
// Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
// return
// }
// if err := c.Delete(ctx, c.Href(tid)); err != nil {
// Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
// return
// }
//
// w.WriteHeader(http.StatusNoContent)
//}

View File

@ -1,265 +0,0 @@
package server_test
//const tickScript = `
//stream
// |from()
// .measurement('cpu')
// |alert()
// .crit(lambda: "usage_idle" < 10)
// .log('/tmp/alert')
//`
//
//func TestValidRuleRequest(t *testing.T) {
// tests := []struct {
// name string
// rule chronograf.AlertRule
// wantErr bool
// }{
// {
// name: "No every with functions",
// rule: chronograf.AlertRule{
// Query: &chronograf.QueryConfig{
// Fields: []chronograf.Field{
// {
// Value: "max",
// Type: "func",
// Args: []chronograf.Field{
// {
// Value: "oldmanpeabody",
// Type: "field",
// },
// },
// },
// },
// },
// },
// wantErr: true,
// },
// {
// name: "With every",
// rule: chronograf.AlertRule{
// Every: "10s",
// Query: &chronograf.QueryConfig{
// Fields: []chronograf.Field{
// {
// Value: "max",
// Type: "func",
// Args: []chronograf.Field{
// {
// Value: "oldmanpeabody",
// Type: "field",
// },
// },
// },
// },
// },
// },
// },
// {
// name: "No query config",
// rule: chronograf.AlertRule{},
// wantErr: true,
// },
// }
// for _, tt := range tests {
// t.Run(tt.name, func(t *testing.T) {
// if err := server.ValidRuleRequest(tt.rule); (err != nil) != tt.wantErr {
// t.Errorf("ValidRuleRequest() error = %v, wantErr %v", err, tt.wantErr)
// }
// })
// }
//}
//
//func Test_KapacitorRulesGet(t *testing.T) {
// kapaTests := []struct {
// name string
// requestPath string
// mockAlerts []chronograf.AlertRule
// expected []chronograf.AlertRule
// }{
// {
// name: "basic",
// requestPath: "/chronograf/v1/sources/1/kapacitors/1/rules",
// mockAlerts: []chronograf.AlertRule{
// {
// ID: "cpu_alert",
// Name: "cpu_alert",
// Status: "enabled",
// Type: "stream",
// DBRPs: []chronograf.DBRP{{DB: "telegraf", RP: "autogen"}},
// TICKScript: tickScript,
// },
// },
// expected: []chronograf.AlertRule{
// {
// ID: "cpu_alert",
// Name: "cpu_alert",
// Status: "enabled",
// Type: "stream",
// DBRPs: []chronograf.DBRP{{DB: "telegraf", RP: "autogen"}},
// TICKScript: tickScript,
// AlertNodes: chronograf.AlertNodes{
// Posts: []*chronograf.Post{},
// TCPs: []*chronograf.TCP{},
// Email: []*chronograf.Email{},
// Exec: []*chronograf.Exec{},
// Log: []*chronograf.Log{},
// VictorOps: []*chronograf.VictorOps{},
// PagerDuty: []*chronograf.PagerDuty{},
// PagerDuty2: []*chronograf.PagerDuty{},
// Pushover: []*chronograf.Pushover{},
// Sensu: []*chronograf.Sensu{},
// Slack: []*chronograf.Slack{},
// Telegram: []*chronograf.Telegram{},
// HipChat: []*chronograf.HipChat{},
// Alerta: []*chronograf.Alerta{},
// OpsGenie: []*chronograf.OpsGenie{},
// OpsGenie2: []*chronograf.OpsGenie{},
// Talk: []*chronograf.Talk{},
// Kafka: []*chronograf.Kafka{},
// },
// },
// },
// },
// }
//
// for _, test := range kapaTests {
// test := test // needed to avoid data race
// t.Run(test.name, func(t *testing.T) {
// t.Parallel()
//
// // setup mock kapa API
// kapaSrv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
// params := r.URL.Query()
// limit, err := strconv.Atoi(params.Get("limit"))
// if err != nil {
// rw.WriteHeader(http.StatusBadRequest)
// return
// }
// offset, err := strconv.Atoi(params.Get("offset"))
// if err != nil {
// rw.WriteHeader(http.StatusBadRequest)
// return
// }
//
// tsks := []map[string]interface{}{}
// for _, task := range test.mockAlerts {
// tsks = append(tsks, map[string]interface{}{
// "id": task.ID,
// "script": tickScript,
// "status": "enabled",
// "type": "stream",
// "dbrps": []chronograf.DBRP{
// {
// DB: "telegraf",
// RP: "autogen",
// },
// },
// "link": map[string]interface{}{
// "rel": "self",
// "href": "/kapacitor/v1/tasks/cpu_alert",
// },
// })
// }
//
// var tasks map[string]interface{}
//
// if offset >= len(tsks) {
// tasks = map[string]interface{}{
// "tasks": []map[string]interface{}{},
// }
// } else if limit+offset > len(tsks) {
// tasks = map[string]interface{}{
// "tasks": tsks[offset:],
// }
// }
// //} else {
// //tasks = map[string]interface{}{
// //"tasks": tsks[offset : offset+limit],
// //}
// //}
//
// err = json.NewEncoder(rw).Encode(&tasks)
// if err != nil {
// t.Error("Failed to encode JSON. err:", err)
// }
// }))
// defer kapaSrv.Close()
//
// // setup mock service and test logger
// testLogger := mocks.TestLogger{}
// svc := &server.Service{
// Store: &mocks.Store{
// SourcesStore: &mocks.SourcesStore{
// GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
// return chronograf.Source{
// ID: ID,
// InsecureSkipVerify: true,
// }, nil
// },
// },
// ServersStore: &mocks.ServersStore{
// GetF: func(ctx context.Context, ID int) (chronograf.Server, error) {
// return chronograf.Server{
// SrcID: ID,
// URL: kapaSrv.URL,
// }, nil
// },
// },
// },
// Logger: &testLogger,
// }
//
// // setup request and response recorder
// req := httptest.NewRequest("GET", test.requestPath, strings.NewReader(""))
// rr := httptest.NewRecorder()
//
// // setup context and request params
// bg := context.Background()
// params := httprouter.Params{
// {
// Key: "id",
// Value: "1",
// },
// {
// Key: "kid",
// Value: "1",
// },
// }
// ctx := httprouter.WithParams(bg, params)
// req = req.WithContext(ctx)
//
// // invoke KapacitorRulesGet endpoint
// svc.KapacitorRulesGet(rr, req)
//
// // destructure response
// frame := struct {
// Rules []struct {
// chronograf.AlertRule
// Links json.RawMessage `json:"links"`
// } `json:"rules"`
// }{}
//
// resp := rr.Result()
//
// err := json.NewDecoder(resp.Body).Decode(&frame)
// if err != nil {
// t.Fatal("Err decoding kapa rule response: err:", err)
// }
//
// actual := make([]chronograf.AlertRule, len(frame.Rules))
//
// for i := range frame.Rules {
// actual[i] = frame.Rules[i].AlertRule
// }
//
// if resp.StatusCode != http.StatusOK {
// t.Fatal("Expected HTTP 200 OK but got", resp.Status)
// }
//
// if !cmp.Equal(test.expected, actual) {
// t.Fatalf("%q - Alert rules differ! diff:\n%s\n", test.name, cmp.Diff(test.expected, actual))
// }
// })
// }
//}

View File

@ -1,119 +0,0 @@
package server
import (
"fmt"
"net/http"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
)
type link struct {
Href string `json:"href"`
Rel string `json:"rel"`
}
type layoutResponse struct {
chronograf.Layout
Link link `json:"link"`
}
func newLayoutResponse(layout chronograf.Layout) layoutResponse {
httpAPILayouts := "/chronograf/v1/layouts"
href := fmt.Sprintf("%s/%s", httpAPILayouts, layout.ID)
rel := "self"
for idx, cell := range layout.Cells {
axes := []string{"x", "y", "y2"}
if cell.Axes == nil {
layout.Cells[idx].Axes = make(map[string]chronograf.Axis, len(axes))
}
if cell.CellColors == nil {
layout.Cells[idx].CellColors = []chronograf.CellColor{}
}
for _, axis := range axes {
if _, found := cell.Axes[axis]; !found {
layout.Cells[idx].Axes[axis] = chronograf.Axis{
Bounds: []string{},
}
}
}
}
return layoutResponse{
Layout: layout,
Link: link{
Href: href,
Rel: rel,
},
}
}
type getLayoutsResponse struct {
Layouts []layoutResponse `json:"layouts"`
}
// Layouts retrieves all layouts from store
func (s *Service) Layouts(w http.ResponseWriter, r *http.Request) {
// Construct a filter sieve for both applications and measurements
filtered := map[string]bool{}
for _, a := range r.URL.Query()["app"] {
filtered[a] = true
}
for _, m := range r.URL.Query()["measurement"] {
filtered[m] = true
}
ctx := r.Context()
layouts, err := s.Store.Layouts(ctx).All(ctx)
if err != nil {
Error(w, http.StatusInternalServerError, "Error loading layouts", s.Logger)
return
}
filter := func(layout *chronograf.Layout) bool {
// If the length of the filter is zero then all values are acceptable.
if len(filtered) == 0 {
return true
}
// If filter contains either measurement or application
return filtered[layout.Measurement] || filtered[layout.Application]
}
res := getLayoutsResponse{
Layouts: []layoutResponse{},
}
seen := make(map[string]bool)
for _, layout := range layouts {
// remove duplicates
if seen[layout.Measurement+layout.ID] {
continue
}
// filter for data that belongs to provided application or measurement
if filter(&layout) {
res.Layouts = append(res.Layouts, newLayoutResponse(layout))
}
}
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// LayoutsID retrieves layout with ID from store
func (s *Service) LayoutsID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
id := httprouter.GetParamFromContext(ctx, "id")
layout, err := s.Store.Layouts(ctx).Get(ctx, id)
if err != nil {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %s not found", id), s.Logger)
return
}
res := newLayoutResponse(layout)
encodeJSON(w, http.StatusOK, res, s.Logger)
}

View File

@ -1,186 +0,0 @@
package server_test
import (
"context"
"encoding/json"
"net/http/httptest"
"net/url"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
"github.com/influxdata/influxdb/v2/chronograf/server"
)
func Test_Layouts(t *testing.T) {
layoutTests := []struct {
name string
expected chronograf.Layout
allLayouts []chronograf.Layout
focusedApp string // should filter all layouts to this app only
shouldErr bool
}{
{
"empty layout",
chronograf.Layout{},
[]chronograf.Layout{},
"",
false,
},
{
"several layouts",
chronograf.Layout{
ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c",
Application: "influxdb",
Measurement: "influxdb",
},
[]chronograf.Layout{
chronograf.Layout{
ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c",
Application: "influxdb",
Measurement: "influxdb",
},
},
"",
false,
},
{
"filtered app",
chronograf.Layout{
ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c",
Application: "influxdb",
Measurement: "influxdb",
},
[]chronograf.Layout{
chronograf.Layout{
ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c",
Application: "influxdb",
Measurement: "influxdb",
},
chronograf.Layout{
ID: "b020101b-ea6b-4c8c-9f0e-db0ba501f4ef",
Application: "chronograf",
Measurement: "chronograf",
},
},
"influxdb",
false,
},
{
"axis zero values",
chronograf.Layout{
ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c",
Application: "influxdb",
Measurement: "influxdb",
Cells: []chronograf.Cell{
{
X: 0,
Y: 0,
W: 4,
H: 4,
I: "3b0e646b-2ca3-4df2-95a5-fd80915459dd",
Name: "A Graph",
CellColors: []chronograf.CellColor{},
Axes: map[string]chronograf.Axis{
"x": chronograf.Axis{
Bounds: []string{},
},
"y": chronograf.Axis{
Bounds: []string{},
},
"y2": chronograf.Axis{
Bounds: []string{},
},
},
},
},
},
[]chronograf.Layout{
chronograf.Layout{
ID: "d20a21c8-69f1-4780-90fe-e69f5e4d138c",
Application: "influxdb",
Measurement: "influxdb",
Cells: []chronograf.Cell{
{
X: 0,
Y: 0,
W: 4,
H: 4,
I: "3b0e646b-2ca3-4df2-95a5-fd80915459dd",
CellColors: []chronograf.CellColor{},
Name: "A Graph",
},
},
},
},
"",
false,
},
}
for _, test := range layoutTests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
// setup mock chronograf.Service and mock logger
lg := &mocks.TestLogger{}
svc := server.Service{
Store: &mocks.Store{LayoutsStore: &mocks.LayoutsStore{
AllF: func(ctx context.Context) ([]chronograf.Layout, error) {
if len(test.allLayouts) == 0 {
return []chronograf.Layout{
test.expected,
}, nil
} else {
return test.allLayouts, nil
}
},
},
},
Logger: lg,
}
// setup mock request and response
rr := httptest.NewRecorder()
reqURL := url.URL{
Path: "/chronograf/v1/layouts",
}
params := reqURL.Query()
// add query params required by test
if test.focusedApp != "" {
params.Add("app", test.focusedApp)
}
// re-inject query params
reqURL.RawQuery = params.Encode()
req := httptest.NewRequest("GET", reqURL.RequestURI(), strings.NewReader(""))
// invoke handler for layouts endpoint
svc.Layouts(rr, req)
// create a throwaway frame to unwrap Layouts
respFrame := struct {
Layouts []struct {
chronograf.Layout
Link interface{} `json:"-"`
} `json:"layouts"`
}{}
// decode resp into respFrame
resp := rr.Result()
if err := json.NewDecoder(resp.Body).Decode(&respFrame); err != nil {
t.Fatalf("%q - Error unmarshalling JSON: err: %s", test.name, err.Error())
}
// compare actual and expected
if !cmp.Equal(test.expected, respFrame.Layouts[0].Layout) {
t.Fatalf("%q - Expected layouts to be equal: diff:\n\t%s", test.name, cmp.Diff(test.expected, respFrame.Layouts[0].Layout))
}
})
}
}

View File

@ -1,59 +0,0 @@
package server
import (
"errors"
"net/url"
)
type getFluxLinksResponse struct {
AST string `json:"ast"`
Self string `json:"self"`
Suggestions string `json:"suggestions"`
}
type getConfigLinksResponse struct {
Self string `json:"self"` // Location of the whole global application configuration
Auth string `json:"auth"` // Location of the auth section of the global application configuration
}
type getOrganizationConfigLinksResponse struct {
Self string `json:"self"` // Location of the organization configuration
LogViewer string `json:"logViewer"` // Location of the organization-specific log viewer configuration
}
type getExternalLinksResponse struct {
StatusFeed *string `json:"statusFeed,omitempty"` // Location of the a JSON Feed for client's Status page News Feed
CustomLinks []CustomLink `json:"custom,omitempty"` // Any custom external links for client's User menu
}
// CustomLink is a handler that returns a custom link to be used in server's routes response, within ExternalLinks
type CustomLink struct {
Name string `json:"name"`
URL string `json:"url"`
}
// NewCustomLinks transforms `--custom-link` CLI flag data or `CUSTOM_LINKS` ENV
// var data into a data structure that the Chronograf client will expect
func NewCustomLinks(links map[string]string) ([]CustomLink, error) {
customLinks := make([]CustomLink, 0, len(links))
for name, link := range links {
if name == "" {
return nil, errors.New("customLink missing key for Name")
}
if link == "" {
return nil, errors.New("customLink missing value for URL")
}
_, err := url.Parse(link)
if err != nil {
return nil, err
}
customLink := CustomLink{
Name: name,
URL: link,
}
customLinks = append(customLinks, customLink)
}
return customLinks, nil
}

View File

@ -1,60 +0,0 @@
package server
import (
"reflect"
"testing"
)
func TestNewCustomLinks(t *testing.T) {
tests := []struct {
name string
args map[string]string
want []CustomLink
wantErr bool
}{
{
name: "Unknown error in NewCustomLinks",
args: map[string]string{
"cubeapple": "https://cube.apple",
},
want: []CustomLink{
{
Name: "cubeapple",
URL: "https://cube.apple",
},
},
},
{
name: "CustomLink missing Name",
args: map[string]string{
"": "https://cube.apple",
},
wantErr: true,
},
{
name: "CustomLink missing URL",
args: map[string]string{
"cubeapple": "",
},
wantErr: true,
},
{
name: "Missing protocol scheme",
args: map[string]string{
"cubeapple": ":k%8a#",
},
wantErr: true,
},
}
for _, tt := range tests {
got, err := NewCustomLinks(tt.args)
if (err != nil) != tt.wantErr {
t.Errorf("%q. NewCustomLinks() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. NewCustomLinks() = %v, want %v", tt.name, got, tt.want)
}
}
}

View File

@ -1,63 +0,0 @@
package server
import (
"net/http"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
)
// statusWriterFlusher captures the status header of an http.ResponseWriter
// and is a flusher
type statusWriter struct {
http.ResponseWriter
Flusher http.Flusher
status int
}
func (w *statusWriter) WriteHeader(status int) {
w.status = status
w.ResponseWriter.WriteHeader(status)
}
func (w *statusWriter) Status() int { return w.status }
// Flush is here because the underlying HTTP chunked transfer response writer
// to implement http.Flusher. Without it data is silently buffered. This
// was discovered when proxying kapacitor chunked logs.
func (w *statusWriter) Flush() {
if w.Flusher != nil {
w.Flusher.Flush()
}
}
// Logger is middleware that logs the request
func Logger(logger chronograf.Logger, next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
logger.WithField("component", "server").
WithField("remote_addr", r.RemoteAddr).
WithField("method", r.Method).
WithField("url", r.URL).
Debug("Request")
sw := &statusWriter{
ResponseWriter: w,
}
if f, ok := w.(http.Flusher); ok {
sw.Flusher = f
}
next.ServeHTTP(sw, r)
later := time.Now()
elapsed := later.Sub(now)
logger.
WithField("component", "server").
WithField("remote_addr", r.RemoteAddr).
WithField("method", r.Method).
WithField("response_time", elapsed.String()).
WithField("status", sw.Status()).
Info("Response: ", http.StatusText(sw.Status()))
}
return http.HandlerFunc(fn)
}

View File

@ -1,24 +0,0 @@
package server
import (
"net/http"
"path"
)
// Logout chooses the correct provider logout route and redirects to it
func Logout(nextURL, basepath string, routes AuthRoutes) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
principal, err := getPrincipal(ctx)
if err != nil {
http.Redirect(w, r, path.Join(basepath, nextURL), http.StatusTemporaryRedirect)
return
}
route, ok := routes.Lookup(principal.Issuer)
if !ok {
http.Redirect(w, r, path.Join(basepath, nextURL), http.StatusTemporaryRedirect)
return
}
http.Redirect(w, r, route.Logout, http.StatusTemporaryRedirect)
}
}

View File

@ -1,264 +0,0 @@
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/oauth2"
)
func (s *Service) mapPrincipalToSuperAdmin(p oauth2.Principal) bool {
if p.Issuer != "auth0" {
return false
}
groups := strings.Split(p.Group, ",")
superAdmin := false
for _, group := range groups {
if group != "" && group == s.SuperAdminProviderGroups.auth0 {
superAdmin = true
break
}
}
return superAdmin
}
func (s *Service) mapPrincipalToRoles(ctx context.Context, p oauth2.Principal) ([]chronograf.Role, error) {
mappings, err := s.Store.Mappings(ctx).All(ctx)
if err != nil {
return nil, err
}
roles := []chronograf.Role{}
MappingsLoop:
for _, mapping := range mappings {
if applyMapping(mapping, p) {
org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &mapping.Organization})
if err != nil {
continue MappingsLoop
}
for _, role := range roles {
if role.Organization == org.ID {
continue MappingsLoop
}
}
roles = append(roles, chronograf.Role{Organization: org.ID, Name: org.DefaultRole})
}
}
return roles, nil
}
func applyMapping(m chronograf.Mapping, p oauth2.Principal) bool {
switch m.Provider {
case chronograf.MappingWildcard, p.Issuer:
default:
return false
}
switch m.Scheme {
case chronograf.MappingWildcard, "oauth2":
default:
return false
}
if m.ProviderOrganization == chronograf.MappingWildcard {
return true
}
groups := strings.Split(p.Group, ",")
return matchGroup(m.ProviderOrganization, groups)
}
func matchGroup(match string, groups []string) bool {
for _, group := range groups {
if match == group {
return true
}
}
return false
}
type mappingsRequest chronograf.Mapping
// Valid determines if a mapping request is valid
func (m *mappingsRequest) Valid() error {
if m.Provider == "" {
return fmt.Errorf("mapping must specify provider")
}
if m.Scheme == "" {
return fmt.Errorf("mapping must specify scheme")
}
if m.ProviderOrganization == "" {
return fmt.Errorf("mapping must specify group")
}
return nil
}
type mappingResponse struct {
Links selfLinks `json:"links"`
chronograf.Mapping
}
func newMappingResponse(m chronograf.Mapping) *mappingResponse {
return &mappingResponse{
Links: selfLinks{
Self: fmt.Sprintf("/chronograf/v1/mappings/%s", m.ID),
},
Mapping: m,
}
}
type mappingsResponse struct {
Links selfLinks `json:"links"`
Mappings []*mappingResponse `json:"mappings"`
}
func newMappingsResponse(ms []chronograf.Mapping) *mappingsResponse {
mappings := []*mappingResponse{}
for _, m := range ms {
mappings = append(mappings, newMappingResponse(m))
}
return &mappingsResponse{
Links: selfLinks{
Self: "/chronograf/v1/mappings",
},
Mappings: mappings,
}
}
// Mappings retrieves all mappings
func (s *Service) Mappings(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
mappings, err := s.Store.Mappings(ctx).All(ctx)
if err != nil {
Error(w, http.StatusInternalServerError, "failed to retrieve mappings from database", s.Logger)
return
}
res := newMappingsResponse(mappings)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// NewMapping adds a new mapping
func (s *Service) NewMapping(w http.ResponseWriter, r *http.Request) {
var req mappingsRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := req.Valid(); err != nil {
invalidData(w, err, s.Logger)
return
}
ctx := r.Context()
// validate that the organization exists
if !s.organizationExists(ctx, req.Organization) {
invalidData(w, fmt.Errorf("organization does not exist"), s.Logger)
return
}
mapping := &chronograf.Mapping{
Organization: req.Organization,
Scheme: req.Scheme,
Provider: req.Provider,
ProviderOrganization: req.ProviderOrganization,
}
m, err := s.Store.Mappings(ctx).Add(ctx, mapping)
if err != nil {
Error(w, http.StatusInternalServerError, "failed to add mapping to database", s.Logger)
return
}
cu := newMappingResponse(*m)
location(w, cu.Links.Self)
encodeJSON(w, http.StatusCreated, cu, s.Logger)
}
// UpdateMapping updates a mapping
func (s *Service) UpdateMapping(w http.ResponseWriter, r *http.Request) {
var req mappingsRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := req.Valid(); err != nil {
invalidData(w, err, s.Logger)
return
}
ctx := r.Context()
// validate that the organization exists
if !s.organizationExists(ctx, req.Organization) {
invalidData(w, fmt.Errorf("organization does not exist"), s.Logger)
return
}
mapping := &chronograf.Mapping{
ID: req.ID,
Organization: req.Organization,
Scheme: req.Scheme,
Provider: req.Provider,
ProviderOrganization: req.ProviderOrganization,
}
err := s.Store.Mappings(ctx).Update(ctx, mapping)
if err != nil {
Error(w, http.StatusInternalServerError, "failed to update mapping in database", s.Logger)
return
}
cu := newMappingResponse(*mapping)
location(w, cu.Links.Self)
encodeJSON(w, http.StatusOK, cu, s.Logger)
}
// RemoveMapping removes a mapping
func (s *Service) RemoveMapping(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
id := httprouter.GetParamFromContext(ctx, "id")
m, err := s.Store.Mappings(ctx).Get(ctx, id)
if err == chronograf.ErrMappingNotFound {
Error(w, http.StatusNotFound, err.Error(), s.Logger)
return
}
if err != nil {
Error(w, http.StatusInternalServerError, "failed to retrieve mapping from database", s.Logger)
return
}
if err := s.Store.Mappings(ctx).Delete(ctx, m); err != nil {
Error(w, http.StatusInternalServerError, "failed to remove mapping from database", s.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
func (s *Service) organizationExists(ctx context.Context, orgID string) bool {
if _, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &orgID}); err != nil {
return false
}
return true
}

View File

@ -1,356 +0,0 @@
package server
import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"net/http/httptest"
"testing"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
"github.com/influxdata/influxdb/v2/chronograf/roles"
)
func TestMappings_All(t *testing.T) {
type fields struct {
MappingsStore chronograf.MappingsStore
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
wants wants
}{
{
name: "get all mappings",
fields: fields{
MappingsStore: &mocks.MappingsStore{
AllF: func(ctx context.Context) ([]chronograf.Mapping, error) {
return []chronograf.Mapping{
{
Organization: "0",
Provider: chronograf.MappingWildcard,
Scheme: chronograf.MappingWildcard,
ProviderOrganization: chronograf.MappingWildcard,
},
}, nil
},
},
},
wants: wants{
statusCode: 200,
contentType: "application/json",
body: `{"links":{"self":"/chronograf/v1/mappings"},"mappings":[{"links":{"self":"/chronograf/v1/mappings/"},"id":"","organizationId":"0","provider":"*","scheme":"*","providerOrganization":"*"}]}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
MappingsStore: tt.fields.MappingsStore,
},
Logger: &chronograf.NoopLogger{},
}
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://any.url", nil)
s.Mappings(w, r)
resp := w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wants.statusCode {
t.Errorf("%q. Mappings() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. Mappings() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. Mappings() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body)
}
})
}
}
func TestMappings_Add(t *testing.T) {
type fields struct {
MappingsStore chronograf.MappingsStore
OrganizationsStore chronograf.OrganizationsStore
}
type args struct {
mapping *chronograf.Mapping
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "create new mapping",
fields: fields{
OrganizationsStore: &mocks.OrganizationsStore{
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "0",
Name: "The Gnarly Default",
DefaultRole: roles.ViewerRoleName,
}, nil
},
},
MappingsStore: &mocks.MappingsStore{
AddF: func(ctx context.Context, m *chronograf.Mapping) (*chronograf.Mapping, error) {
m.ID = "0"
return m, nil
},
},
},
args: args{
mapping: &chronograf.Mapping{
Organization: "0",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
},
wants: wants{
statusCode: 201,
contentType: "application/json",
body: `{"links":{"self":"/chronograf/v1/mappings/0"},"id":"0","organizationId":"0","provider":"*","scheme":"*","providerOrganization":"*"}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
MappingsStore: tt.fields.MappingsStore,
OrganizationsStore: tt.fields.OrganizationsStore,
},
Logger: &chronograf.NoopLogger{},
}
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://any.url", nil)
buf, _ := json.Marshal(tt.args.mapping)
r.Body = ioutil.NopCloser(bytes.NewReader(buf))
s.NewMapping(w, r)
resp := w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wants.statusCode {
t.Errorf("%q. Add() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. Add() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. Add() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body)
}
})
}
}
func TestMappings_Update(t *testing.T) {
type fields struct {
MappingsStore chronograf.MappingsStore
OrganizationsStore chronograf.OrganizationsStore
}
type args struct {
mapping *chronograf.Mapping
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "update new mapping",
fields: fields{
OrganizationsStore: &mocks.OrganizationsStore{
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "0",
Name: "The Gnarly Default",
DefaultRole: roles.ViewerRoleName,
}, nil
},
},
MappingsStore: &mocks.MappingsStore{
UpdateF: func(ctx context.Context, m *chronograf.Mapping) error {
return nil
},
},
},
args: args{
mapping: &chronograf.Mapping{
ID: "1",
Organization: "0",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
},
wants: wants{
statusCode: 200,
contentType: "application/json",
body: `{"links":{"self":"/chronograf/v1/mappings/1"},"id":"1","organizationId":"0","provider":"*","scheme":"*","providerOrganization":"*"}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
MappingsStore: tt.fields.MappingsStore,
OrganizationsStore: tt.fields.OrganizationsStore,
},
Logger: &chronograf.NoopLogger{},
}
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://any.url", nil)
buf, _ := json.Marshal(tt.args.mapping)
r.Body = ioutil.NopCloser(bytes.NewReader(buf))
r = r.WithContext(httprouter.WithParams(
context.Background(),
httprouter.Params{
{
Key: "id",
Value: tt.args.mapping.ID,
},
}))
s.UpdateMapping(w, r)
resp := w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wants.statusCode {
t.Errorf("%q. Add() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. Add() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. Add() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body)
}
})
}
}
func TestMappings_Remove(t *testing.T) {
type fields struct {
MappingsStore chronograf.MappingsStore
}
type args struct {
id string
}
type wants struct {
statusCode int
contentType string
body string
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "remove mapping",
fields: fields{
MappingsStore: &mocks.MappingsStore{
GetF: func(ctx context.Context, id string) (*chronograf.Mapping, error) {
return &chronograf.Mapping{
ID: "1",
Organization: "0",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
}, nil
},
DeleteF: func(ctx context.Context, m *chronograf.Mapping) error {
return nil
},
},
},
args: args{},
wants: wants{
statusCode: 204,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
MappingsStore: tt.fields.MappingsStore,
},
Logger: &chronograf.NoopLogger{},
}
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://any.url", nil)
r = r.WithContext(httprouter.WithParams(
context.Background(),
httprouter.Params{
{
Key: "id",
Value: tt.args.id,
},
}))
s.RemoveMapping(w, r)
resp := w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wants.statusCode {
t.Errorf("%q. Remove() = %v, want %v", tt.name, resp.StatusCode, tt.wants.statusCode)
}
if tt.wants.contentType != "" && content != tt.wants.contentType {
t.Errorf("%q. Remove() = %v, want %v", tt.name, content, tt.wants.contentType)
}
if eq, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq {
t.Errorf("%q. Remove() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wants.body)
}
})
}
}

View File

@ -1,400 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"sort"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/oauth2"
"github.com/influxdata/influxdb/v2/chronograf/organizations"
"golang.org/x/net/context"
)
type meLinks struct {
Self string `json:"self"` // Self link mapping to this resource
}
type meResponse struct {
*chronograf.User
Links meLinks `json:"links"`
Organizations []chronograf.Organization `json:"organizations"`
CurrentOrganization *chronograf.Organization `json:"currentOrganization,omitempty"`
}
type noAuthMeResponse struct {
Links meLinks `json:"links"`
}
func newNoAuthMeResponse() noAuthMeResponse {
return noAuthMeResponse{
Links: meLinks{
Self: "/chronograf/v1/me",
},
}
}
// If new user response is nil, return an empty meResponse because it
// indicates authentication is not needed
func newMeResponse(usr *chronograf.User, org string) meResponse {
base := "/chronograf/v1"
name := "me"
if usr != nil {
base = fmt.Sprintf("/chronograf/v1/organizations/%s/users", org)
name = PathEscape(fmt.Sprintf("%d", usr.ID))
}
return meResponse{
User: usr,
Links: meLinks{
Self: fmt.Sprintf("%s/%s", base, name),
},
}
}
// TODO: This Scheme value is hard-coded temporarily since we only currently
// support OAuth2. This hard-coding should be removed whenever we add
// support for other authentication schemes.
func getScheme(ctx context.Context) (string, error) {
return "oauth2", nil
}
func getPrincipal(ctx context.Context) (oauth2.Principal, error) {
principal, ok := ctx.Value(oauth2.PrincipalKey).(oauth2.Principal)
if !ok {
return oauth2.Principal{}, fmt.Errorf("token not found")
}
return principal, nil
}
func getValidPrincipal(ctx context.Context) (oauth2.Principal, error) {
p, err := getPrincipal(ctx)
if err != nil {
return p, err
}
if p.Subject == "" {
return oauth2.Principal{}, fmt.Errorf("token not found")
}
if p.Issuer == "" {
return oauth2.Principal{}, fmt.Errorf("token not found")
}
return p, nil
}
type meRequest struct {
// Organization is the OrganizationID
Organization string `json:"organization"`
}
// UpdateMe changes the user's current organization on the JWT and responds
// with the same semantics as Me
func (s *Service) UpdateMe(auth oauth2.Authenticator) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
serverCtx := serverContext(ctx)
principal, err := auth.Validate(ctx, r)
if err != nil {
s.Logger.Error(fmt.Sprintf("Invalid principal: %v", err))
Error(w, http.StatusForbidden, "invalid principal", s.Logger)
return
}
var req meRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
// validate that the organization exists
org, err := s.Store.Organizations(serverCtx).Get(serverCtx, chronograf.OrganizationQuery{ID: &req.Organization})
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
// validate that user belongs to organization
ctx = context.WithValue(ctx, organizations.ContextKey, req.Organization)
p, err := getValidPrincipal(ctx)
if err != nil {
invalidData(w, err, s.Logger)
return
}
if p.Organization == "" {
defaultOrg, err := s.Store.Organizations(serverCtx).DefaultOrganization(serverCtx)
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
p.Organization = defaultOrg.ID
}
scheme, err := getScheme(ctx)
if err != nil {
invalidData(w, err, s.Logger)
return
}
_, err = s.Store.Users(ctx).Get(ctx, chronograf.UserQuery{
Name: &p.Subject,
Provider: &p.Issuer,
Scheme: &scheme,
})
if err == chronograf.ErrUserNotFound {
// If the user was not found, check to see if they are a super admin. If
// they are, add them to the organization.
u, err := s.Store.Users(serverCtx).Get(serverCtx, chronograf.UserQuery{
Name: &p.Subject,
Provider: &p.Issuer,
Scheme: &scheme,
})
if err != nil {
Error(w, http.StatusForbidden, err.Error(), s.Logger)
return
}
if !u.SuperAdmin {
// Since a user is not a part of this organization and not a super admin,
// we should tell them that they are Forbidden (403) from accessing this resource
Error(w, http.StatusForbidden, chronograf.ErrUserNotFound.Error(), s.Logger)
return
}
// If the user is a super admin give them an admin role in the
// requested organization.
u.Roles = append(u.Roles, chronograf.Role{
Organization: org.ID,
Name: org.DefaultRole,
})
if err := s.Store.Users(serverCtx).Update(serverCtx, u); err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
} else if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
// TODO: change to principal.CurrentOrganization
principal.Organization = req.Organization
if err := auth.Authorize(ctx, w, principal); err != nil {
Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
return
}
ctx = context.WithValue(ctx, oauth2.PrincipalKey, principal)
s.Me(w, r.WithContext(ctx))
}
}
// Me does a findOrCreate based on the username in the context
func (s *Service) Me(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if !s.UseAuth {
// If there's no authentication, return an empty user
res := newNoAuthMeResponse()
encodeJSON(w, http.StatusOK, res, s.Logger)
return
}
p, err := getValidPrincipal(ctx)
if err != nil {
invalidData(w, err, s.Logger)
return
}
scheme, err := getScheme(ctx)
if err != nil {
invalidData(w, err, s.Logger)
return
}
ctx = context.WithValue(ctx, organizations.ContextKey, p.Organization)
serverCtx := serverContext(ctx)
defaultOrg, err := s.Store.Organizations(serverCtx).DefaultOrganization(serverCtx)
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
if p.Organization == "" {
p.Organization = defaultOrg.ID
}
usr, err := s.Store.Users(serverCtx).Get(serverCtx, chronograf.UserQuery{
Name: &p.Subject,
Provider: &p.Issuer,
Scheme: &scheme,
})
if err != nil && err != chronograf.ErrUserNotFound {
unknownErrorWithMessage(w, err, s.Logger)
return
}
// user exists
if usr != nil {
superAdmin := s.mapPrincipalToSuperAdmin(p)
if superAdmin && !usr.SuperAdmin {
usr.SuperAdmin = superAdmin
err := s.Store.Users(serverCtx).Update(serverCtx, usr)
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
}
currentOrg, err := s.Store.Organizations(serverCtx).Get(serverCtx, chronograf.OrganizationQuery{ID: &p.Organization})
if err == chronograf.ErrOrganizationNotFound {
// The intent is to force a the user to go through another auth flow
Error(w, http.StatusForbidden, "user's current organization was not found", s.Logger)
return
}
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
orgs, err := s.usersOrganizations(serverCtx, usr)
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
res := newMeResponse(usr, currentOrg.ID)
res.Organizations = orgs
res.CurrentOrganization = currentOrg
encodeJSON(w, http.StatusOK, res, s.Logger)
return
}
// Because we didnt find a user, making a new one
user := &chronograf.User{
Name: p.Subject,
Provider: p.Issuer,
// TODO: This Scheme value is hard-coded temporarily since we only currently
// support OAuth2. This hard-coding should be removed whenever we add
// support for other authentication schemes.
Scheme: scheme,
// TODO(desa): this needs a better name
SuperAdmin: s.newUsersAreSuperAdmin(),
}
superAdmin := s.mapPrincipalToSuperAdmin(p)
if superAdmin {
user.SuperAdmin = superAdmin
}
roles, err := s.mapPrincipalToRoles(serverCtx, p)
if err != nil {
Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
return
}
if !superAdmin && len(roles) == 0 {
Error(w, http.StatusForbidden, "This Chronograf is private. To gain access, you must be explicitly added by an administrator.", s.Logger)
return
}
// If the user is a superadmin, give them a role in the default organization
if user.SuperAdmin {
hasDefaultOrgRole := false
for _, role := range roles {
if role.Organization == defaultOrg.ID {
hasDefaultOrgRole = true
break
}
}
if !hasDefaultOrgRole {
roles = append(roles, chronograf.Role{
Name: defaultOrg.DefaultRole,
Organization: defaultOrg.ID,
})
}
}
user.Roles = roles
newUser, err := s.Store.Users(serverCtx).Add(serverCtx, user)
if err != nil {
msg := fmt.Errorf("error storing user %s: %v", user.Name, err)
unknownErrorWithMessage(w, msg, s.Logger)
return
}
orgs, err := s.usersOrganizations(serverCtx, newUser)
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
currentOrg, err := s.Store.Organizations(serverCtx).Get(serverCtx, chronograf.OrganizationQuery{ID: &p.Organization})
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
res := newMeResponse(newUser, currentOrg.ID)
res.Organizations = orgs
res.CurrentOrganization = currentOrg
encodeJSON(w, http.StatusOK, res, s.Logger)
}
func (s *Service) firstUser() bool {
serverCtx := serverContext(context.Background())
numUsers, err := s.Store.Users(serverCtx).Num(serverCtx)
if err != nil {
return false
}
return numUsers == 0
}
func (s *Service) newUsersAreSuperAdmin() bool {
// It's not necessary to enforce that the first user is superAdmin here, since
// superAdminNewUsers defaults to true, but there's nothing else in the
// application that dictates that it must be true.
// So for that reason, we kept this here for now. We've discussed the
// future possibility of allowing users to override default values via CLI and
// this case could possibly happen then.
if s.firstUser() {
return true
}
serverCtx := serverContext(context.Background())
cfg, err := s.Store.Config(serverCtx).Get(serverCtx)
if err != nil {
return false
}
return cfg.Auth.SuperAdminNewUsers
}
func (s *Service) usersOrganizations(ctx context.Context, u *chronograf.User) ([]chronograf.Organization, error) {
if u == nil {
// TODO(desa): better error
return nil, fmt.Errorf("user was nil")
}
orgIDs := map[string]bool{}
for _, role := range u.Roles {
orgIDs[role.Organization] = true
}
orgs := []chronograf.Organization{}
for orgID := range orgIDs {
org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &orgID})
// There can be race conditions between deleting a organization and the me query
if err == chronograf.ErrOrganizationNotFound {
continue
}
// Any other error should cause an error to be returned
if err != nil {
return nil, err
}
orgs = append(orgs, *org)
}
sort.Slice(orgs, func(i, j int) bool {
return orgs[i].ID < orgs[j].ID
})
return orgs, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,57 +0,0 @@
package server
import (
"net/http"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
)
// RouteMatchesPrincipal checks that the organization on context matches the organization
// in the route.
func RouteMatchesPrincipal(
store DataStore,
useAuth bool,
logger chronograf.Logger,
next http.HandlerFunc,
) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if !useAuth {
next(w, r)
return
}
log := logger.
WithField("component", "org_match").
WithField("remote_addr", r.RemoteAddr).
WithField("method", r.Method).
WithField("url", r.URL)
orgID := httprouter.GetParamFromContext(ctx, "oid")
p, err := getValidPrincipal(ctx)
if err != nil {
log.Error("Failed to retrieve principal from context")
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
if p.Organization == "" {
defaultOrg, err := store.Organizations(ctx).DefaultOrganization(ctx)
if err != nil {
log.Error("Failed to look up default organization")
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
p.Organization = defaultOrg.ID
}
if orgID != p.Organization {
log.Error("Route organization does not match the organization on principal")
Error(w, http.StatusForbidden, "User is not authorized", logger)
return
}
next(w, r)
}
}

View File

@ -1,195 +0,0 @@
package server
import (
"context"
"net/http"
"net/http/httptest"
"testing"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
"github.com/influxdata/influxdb/v2/chronograf/oauth2"
)
func TestRouteMatchesPrincipal(t *testing.T) {
type fields struct {
OrganizationsStore chronograf.OrganizationsStore
Logger chronograf.Logger
}
type args struct {
useAuth bool
principal *oauth2.Principal
routerParams *httprouter.Params
}
type wants struct {
matches bool
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "route matches request params",
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "default",
}, nil
},
},
},
args: args{
useAuth: true,
principal: &oauth2.Principal{
Subject: "user",
Issuer: "github",
Organization: "default",
},
routerParams: &httprouter.Params{
{
Key: "oid",
Value: "default",
},
},
},
wants: wants{
matches: true,
},
},
{
name: "route does not match request params",
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "default",
}, nil
},
},
},
args: args{
useAuth: true,
principal: &oauth2.Principal{
Subject: "user",
Issuer: "github",
Organization: "default",
},
routerParams: &httprouter.Params{
{
Key: "oid",
Value: "other",
},
},
},
wants: wants{
matches: false,
},
},
{
name: "missing principal",
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "default",
}, nil
},
},
},
args: args{
useAuth: true,
principal: nil,
routerParams: &httprouter.Params{
{
Key: "oid",
Value: "other",
},
},
},
wants: wants{
matches: false,
},
},
{
name: "not using auth",
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
DefaultOrganizationF: func(ctx context.Context) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "default",
}, nil
},
},
},
args: args{
useAuth: false,
principal: &oauth2.Principal{
Subject: "user",
Issuer: "github",
Organization: "default",
},
routerParams: &httprouter.Params{
{
Key: "oid",
Value: "other",
},
},
},
wants: wants{
matches: true,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
store := &mocks.Store{
OrganizationsStore: tt.fields.OrganizationsStore,
}
var matches bool
next := func(w http.ResponseWriter, r *http.Request) {
matches = true
}
fn := RouteMatchesPrincipal(
store,
tt.args.useAuth,
tt.fields.Logger,
next,
)
w := httptest.NewRecorder()
url := "http://any.url"
r := httptest.NewRequest(
"GET",
url,
nil,
)
if tt.args.routerParams != nil {
r = r.WithContext(httprouter.WithParams(r.Context(), *tt.args.routerParams))
}
if tt.args.principal == nil {
r = r.WithContext(context.WithValue(r.Context(), oauth2.PrincipalKey, nil))
} else {
r = r.WithContext(context.WithValue(r.Context(), oauth2.PrincipalKey, *tt.args.principal))
}
fn(w, r)
if matches != tt.wants.matches {
t.Errorf("%q. RouteMatchesPrincipal() = %v, expected %v", tt.name, matches, tt.wants.matches)
}
if !matches && w.Code != http.StatusForbidden {
t.Errorf("%q. RouteMatchesPrincipal() Status Code = %v, expected %v", tt.name, w.Code, http.StatusForbidden)
}
})
}
}

View File

@ -1,59 +0,0 @@
package server
import (
"net/http"
libpath "path"
"github.com/influxdata/influxdb/v2/chronograf"
)
var _ chronograf.Router = &MountableRouter{}
// MountableRouter is an implementation of a chronograf.Router which supports
// prefixing each route of a Delegated chronograf.Router with a prefix.
type MountableRouter struct {
Prefix string
Delegate chronograf.Router
}
// DELETE defines a route responding to a DELETE request that will be prefixed
// with the configured route prefix
func (mr *MountableRouter) DELETE(path string, handler http.HandlerFunc) {
mr.Delegate.DELETE(libpath.Join(mr.Prefix, path), handler)
}
// GET defines a route responding to a GET request that will be prefixed
// with the configured route prefix
func (mr *MountableRouter) GET(path string, handler http.HandlerFunc) {
mr.Delegate.GET(libpath.Join(mr.Prefix, path), handler)
}
// POST defines a route responding to a POST request that will be prefixed
// with the configured route prefix
func (mr *MountableRouter) POST(path string, handler http.HandlerFunc) {
mr.Delegate.POST(libpath.Join(mr.Prefix, path), handler)
}
// PUT defines a route responding to a PUT request that will be prefixed
// with the configured route prefix
func (mr *MountableRouter) PUT(path string, handler http.HandlerFunc) {
mr.Delegate.PUT(libpath.Join(mr.Prefix, path), handler)
}
// PATCH defines a route responding to a PATCH request that will be prefixed
// with the configured route prefix
func (mr *MountableRouter) PATCH(path string, handler http.HandlerFunc) {
mr.Delegate.PATCH(libpath.Join(mr.Prefix, path), handler)
}
// Handler defines a prefixed route responding to a request type specified in
// the method parameter
func (mr *MountableRouter) Handler(method string, path string, handler http.Handler) {
mr.Delegate.Handler(method, libpath.Join(mr.Prefix, path), handler)
}
// ServeHTTP is an implementation of http.Handler which delegates to the
// configured Delegate's implementation of http.Handler
func (mr *MountableRouter) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
mr.Delegate.ServeHTTP(rw, r)
}

View File

@ -1,240 +0,0 @@
package server_test
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf/server"
)
func Test_MountableRouter_MountsRoutesUnderPrefix(t *testing.T) {
t.Parallel()
mr := &server.MountableRouter{
Prefix: "/chronograf",
Delegate: httprouter.New(),
}
expected := "Hello?! McFly?! Anybody in there?!"
mr.GET("/biff", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
fmt.Fprint(rw, expected)
}))
ts := httptest.NewServer(mr)
defer ts.Close()
resp, err := http.Get(ts.URL + "/chronograf/biff")
if err != nil {
t.Fatal("Unexpected error fetching from mounted router: err:", err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal("Unexpected error decoding response body: err:", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatal("Expected 200 but received", resp.StatusCode)
}
if string(body) != expected {
t.Fatalf("Unexpected response body: Want: \"%s\". Got: \"%s\"", expected, string(body))
}
}
func Test_MountableRouter_PrefixesPosts(t *testing.T) {
t.Parallel()
mr := &server.MountableRouter{
Prefix: "/chronograf",
Delegate: httprouter.New(),
}
expected := "Great Scott!"
actual := make([]byte, len(expected))
mr.POST("/doc", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
if _, err := io.ReadFull(r.Body, actual); err != nil {
rw.WriteHeader(http.StatusInternalServerError)
} else {
rw.WriteHeader(http.StatusOK)
}
}))
ts := httptest.NewServer(mr)
defer ts.Close()
resp, err := http.Post(ts.URL+"/chronograf/doc", "text/plain", strings.NewReader(expected))
if err != nil {
t.Fatal("Unexpected error posting to mounted router: err:", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatal("Expected 200 but received", resp.StatusCode)
}
if string(actual) != expected {
t.Fatalf("Unexpected request body: Want: \"%s\". Got: \"%s\"", expected, string(actual))
}
}
func Test_MountableRouter_PrefixesPuts(t *testing.T) {
t.Parallel()
mr := &server.MountableRouter{
Prefix: "/chronograf",
Delegate: httprouter.New(),
}
expected := "Great Scott!"
actual := make([]byte, len(expected))
mr.PUT("/doc", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
if _, err := io.ReadFull(r.Body, actual); err != nil {
rw.WriteHeader(http.StatusInternalServerError)
} else {
rw.WriteHeader(http.StatusOK)
}
}))
ts := httptest.NewServer(mr)
defer ts.Close()
req := httptest.NewRequest(http.MethodPut, ts.URL+"/chronograf/doc", strings.NewReader(expected))
req.Header.Set("Content-Type", "text/plain; charset=utf-8")
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(expected)))
req.RequestURI = ""
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
t.Fatal("Unexpected error posting to mounted router: err:", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatal("Expected 200 but received", resp.StatusCode)
}
if string(actual) != expected {
t.Fatalf("Unexpected request body: Want: \"%s\". Got: \"%s\"", expected, string(actual))
}
}
func Test_MountableRouter_PrefixesDeletes(t *testing.T) {
t.Parallel()
mr := &server.MountableRouter{
Prefix: "/chronograf",
Delegate: httprouter.New(),
}
mr.DELETE("/proto1985", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusNoContent)
}))
ts := httptest.NewServer(mr)
defer ts.Close()
req := httptest.NewRequest(http.MethodDelete, ts.URL+"/chronograf/proto1985", nil)
req.RequestURI = ""
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
t.Fatal("Unexpected error sending request to mounted router: err:", err)
}
if resp.StatusCode != http.StatusNoContent {
t.Fatal("Expected 204 but received", resp.StatusCode)
}
}
func Test_MountableRouter_PrefixesPatches(t *testing.T) {
t.Parallel()
type Character struct {
Name string
Items []string
}
mr := &server.MountableRouter{
Prefix: "/chronograf",
Delegate: httprouter.New(),
}
biff := Character{"biff", []string{"sports almanac"}}
mr.PATCH("/1955", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
c := Character{}
err := json.NewDecoder(r.Body).Decode(&c)
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
} else {
biff.Items = c.Items
rw.WriteHeader(http.StatusOK)
}
}))
ts := httptest.NewServer(mr)
defer ts.Close()
r, w := io.Pipe()
go func() {
_ = json.NewEncoder(w).Encode(Character{"biff", []string{}})
w.Close()
}()
req := httptest.NewRequest(http.MethodPatch, ts.URL+"/chronograf/1955", r)
req.RequestURI = ""
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
t.Fatal("Unexpected error sending request to mounted router: err:", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatal("Expected 200 but received", resp.StatusCode)
}
if len(biff.Items) != 0 {
t.Fatal("Failed to alter history, biff still has the sports almanac")
}
}
func Test_MountableRouter_PrefixesHandler(t *testing.T) {
t.Parallel()
mr := &server.MountableRouter{
Prefix: "/chronograf",
Delegate: httprouter.New(),
}
mr.Handler(http.MethodGet, "/recklessAmountOfPower", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
rw.Write([]byte("1.21 Gigawatts!"))
}))
ts := httptest.NewServer(mr)
defer ts.Close()
req := httptest.NewRequest(http.MethodGet, ts.URL+"/chronograf/recklessAmountOfPower", nil)
req.RequestURI = ""
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
t.Fatal("Unexpected error sending request to mounted router: err:", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatal("Expected 200 but received", resp.StatusCode)
}
}

View File

@ -1,399 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"path"
"strconv"
"strings"
"github.com/NYTimes/gziphandler"
"github.com/bouk/httprouter"
jhttprouter "github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/oauth2"
"github.com/influxdata/influxdb/v2/chronograf/roles"
)
const (
// JSONType the mimetype for a json request
JSONType = "application/json"
)
// MuxOpts are the options for the router. Mostly related to auth.
type MuxOpts struct {
Logger chronograf.Logger
Develop bool // Develop loads assets from filesystem instead of bindata
Basepath string // URL path prefix under which all chronograf routes will be mounted
UseAuth bool // UseAuth turns on Github OAuth and JWT
Auth oauth2.Authenticator // Auth is used to authenticate and authorize
ProviderFuncs []func(func(oauth2.Provider, oauth2.Mux))
StatusFeedURL string // JSON Feed URL for the client Status page News Feed
CustomLinks map[string]string // Any custom external links for client's User menu
}
// NewMux attaches all the route handlers; handler returned servers chronograf.
func NewMux(opts MuxOpts, service Service) http.Handler {
hr := httprouter.New()
/* React Application */
assets := Assets(AssetsOpts{
Develop: opts.Develop,
Logger: opts.Logger,
})
// Prefix any URLs found in the React assets with any configured basepath
prefixedAssets := NewDefaultURLPrefixer(opts.Basepath, assets, opts.Logger)
// Compress the assets with gzip if an accepted encoding
compressed := gziphandler.GzipHandler(prefixedAssets)
// The react application handles all the routing if the server does not
// know about the route. This means that we never have unknown routes on
// the server.
hr.NotFound = compressed
var router chronograf.Router = hr
// Set route prefix for all routes if basepath is present
if opts.Basepath != "" {
router = &MountableRouter{
Prefix: opts.Basepath,
Delegate: hr,
}
//The assets handler is always unaware of basepaths, so the
// basepath needs to always be removed before sending requests to it
hr.NotFound = http.StripPrefix(opts.Basepath, hr.NotFound)
}
EnsureMember := func(next http.HandlerFunc) http.HandlerFunc {
return AuthorizedUser(
service.Store,
opts.UseAuth,
roles.MemberRoleName,
opts.Logger,
next,
)
}
_ = EnsureMember
EnsureViewer := func(next http.HandlerFunc) http.HandlerFunc {
return AuthorizedUser(
service.Store,
opts.UseAuth,
roles.ViewerRoleName,
opts.Logger,
next,
)
}
EnsureEditor := func(next http.HandlerFunc) http.HandlerFunc {
return AuthorizedUser(
service.Store,
opts.UseAuth,
roles.EditorRoleName,
opts.Logger,
next,
)
}
EnsureAdmin := func(next http.HandlerFunc) http.HandlerFunc {
return AuthorizedUser(
service.Store,
opts.UseAuth,
roles.AdminRoleName,
opts.Logger,
next,
)
}
EnsureSuperAdmin := func(next http.HandlerFunc) http.HandlerFunc {
return AuthorizedUser(
service.Store,
opts.UseAuth,
roles.SuperAdminStatus,
opts.Logger,
next,
)
}
rawStoreAccess := func(next http.HandlerFunc) http.HandlerFunc {
return RawStoreAccess(opts.Logger, next)
}
ensureOrgMatches := func(next http.HandlerFunc) http.HandlerFunc {
return RouteMatchesPrincipal(
service.Store,
opts.UseAuth,
opts.Logger,
next,
)
}
/* Documentation */
router.GET("/swagger.json", Spec())
router.GET("/docs", Redoc("/swagger.json"))
/* API */
// Organizations
router.GET("/chronograf/v1/organizations", EnsureAdmin(service.Organizations))
router.POST("/chronograf/v1/organizations", EnsureSuperAdmin(service.NewOrganization))
router.GET("/chronograf/v1/organizations/:oid", EnsureAdmin(service.OrganizationID))
router.PATCH("/chronograf/v1/organizations/:oid", EnsureSuperAdmin(service.UpdateOrganization))
router.DELETE("/chronograf/v1/organizations/:oid", EnsureSuperAdmin(service.RemoveOrganization))
// Mappings
router.GET("/chronograf/v1/mappings", EnsureSuperAdmin(service.Mappings))
router.POST("/chronograf/v1/mappings", EnsureSuperAdmin(service.NewMapping))
router.PUT("/chronograf/v1/mappings/:id", EnsureSuperAdmin(service.UpdateMapping))
router.DELETE("/chronograf/v1/mappings/:id", EnsureSuperAdmin(service.RemoveMapping))
// Source Proxy to Influx; Has gzip compression around the handler
influx := gziphandler.GzipHandler(http.HandlerFunc(EnsureViewer(service.Influx)))
router.Handler("POST", "/chronograf/v1/sources/:id/proxy", influx)
// Write proxies line protocol write requests to InfluxDB
router.POST("/chronograf/v1/sources/:id/write", EnsureViewer(service.Write))
// Queries is used to analyze a specific queries and does not create any
// resources. It's a POST because Queries are POSTed to InfluxDB, but this
// only modifies InfluxDB resources with certain metaqueries, e.g. DROP DATABASE.
//
// Admins should ensure that the InfluxDB source as the proper permissions
// intended for Chronograf Users with the Viewer Role type.
router.POST("/chronograf/v1/sources/:id/queries", EnsureViewer(service.Queries))
// Annotations are user-defined events associated with this source
router.GET("/chronograf/v1/sources/:id/annotations", EnsureViewer(service.Annotations))
router.POST("/chronograf/v1/sources/:id/annotations", EnsureEditor(service.NewAnnotation))
router.GET("/chronograf/v1/sources/:id/annotations/:aid", EnsureViewer(service.Annotation))
router.DELETE("/chronograf/v1/sources/:id/annotations/:aid", EnsureEditor(service.RemoveAnnotation))
router.PATCH("/chronograf/v1/sources/:id/annotations/:aid", EnsureEditor(service.UpdateAnnotation))
// All possible permissions for users in this source
router.GET("/chronograf/v1/sources/:id/permissions", EnsureViewer(service.Permissions))
// Services are resources that chronograf proxies to
router.GET("/chronograf/v1/sources/:id/services", EnsureViewer(service.Services))
router.POST("/chronograf/v1/sources/:id/services", EnsureEditor(service.NewService))
router.GET("/chronograf/v1/sources/:id/services/:kid", EnsureViewer(service.ServiceID))
router.PATCH("/chronograf/v1/sources/:id/services/:kid", EnsureEditor(service.UpdateService))
router.DELETE("/chronograf/v1/sources/:id/services/:kid", EnsureEditor(service.RemoveService))
// Service Proxy
router.GET("/chronograf/v1/sources/:id/services/:kid/proxy", EnsureViewer(service.ProxyGet))
router.POST("/chronograf/v1/sources/:id/services/:kid/proxy", EnsureEditor(service.ProxyPost))
router.PATCH("/chronograf/v1/sources/:id/services/:kid/proxy", EnsureEditor(service.ProxyPatch))
router.DELETE("/chronograf/v1/sources/:id/services/:kid/proxy", EnsureEditor(service.ProxyDelete))
// Layouts
router.GET("/chronograf/v1/layouts", EnsureViewer(service.Layouts))
router.GET("/chronograf/v1/layouts/:id", EnsureViewer(service.LayoutsID))
// Users associated with Chronograf
router.GET("/chronograf/v1/me", service.Me)
// Set current chronograf organization the user is logged into
router.PUT("/chronograf/v1/me", service.UpdateMe(opts.Auth))
// TODO(desa): what to do about admin's being able to set superadmin
router.GET("/chronograf/v1/organizations/:oid/users", EnsureAdmin(ensureOrgMatches(service.Users)))
router.POST("/chronograf/v1/organizations/:oid/users", EnsureAdmin(ensureOrgMatches(service.NewUser)))
router.GET("/chronograf/v1/organizations/:oid/users/:id", EnsureAdmin(ensureOrgMatches(service.UserID)))
router.DELETE("/chronograf/v1/organizations/:oid/users/:id", EnsureAdmin(ensureOrgMatches(service.RemoveUser)))
router.PATCH("/chronograf/v1/organizations/:oid/users/:id", EnsureAdmin(ensureOrgMatches(service.UpdateUser)))
router.GET("/chronograf/v1/users", EnsureSuperAdmin(rawStoreAccess(service.Users)))
router.POST("/chronograf/v1/users", EnsureSuperAdmin(rawStoreAccess(service.NewUser)))
router.GET("/chronograf/v1/users/:id", EnsureSuperAdmin(rawStoreAccess(service.UserID)))
router.DELETE("/chronograf/v1/users/:id", EnsureSuperAdmin(rawStoreAccess(service.RemoveUser)))
router.PATCH("/chronograf/v1/users/:id", EnsureSuperAdmin(rawStoreAccess(service.UpdateUser)))
// Dashboards
router.GET("/chronograf/v1/dashboards", EnsureViewer(service.Dashboards))
router.POST("/chronograf/v1/dashboards", EnsureEditor(service.NewDashboard))
router.GET("/chronograf/v1/dashboards/:id", EnsureViewer(service.DashboardID))
router.DELETE("/chronograf/v1/dashboards/:id", EnsureEditor(service.RemoveDashboard))
router.PUT("/chronograf/v1/dashboards/:id", EnsureEditor(service.ReplaceDashboard))
router.PATCH("/chronograf/v1/dashboards/:id", EnsureEditor(service.UpdateDashboard))
// Dashboard Cells
router.GET("/chronograf/v1/dashboards/:id/cells", EnsureViewer(service.DashboardCells))
router.POST("/chronograf/v1/dashboards/:id/cells", EnsureEditor(service.NewDashboardCell))
router.GET("/chronograf/v1/dashboards/:id/cells/:cid", EnsureViewer(service.DashboardCellID))
router.DELETE("/chronograf/v1/dashboards/:id/cells/:cid", EnsureEditor(service.RemoveDashboardCell))
router.PUT("/chronograf/v1/dashboards/:id/cells/:cid", EnsureEditor(service.ReplaceDashboardCell))
// Dashboard Templates
router.GET("/chronograf/v1/dashboards/:id/templates", EnsureViewer(service.Templates))
router.POST("/chronograf/v1/dashboards/:id/templates", EnsureEditor(service.NewTemplate))
router.GET("/chronograf/v1/dashboards/:id/templates/:tid", EnsureViewer(service.TemplateID))
router.DELETE("/chronograf/v1/dashboards/:id/templates/:tid", EnsureEditor(service.RemoveTemplate))
router.PUT("/chronograf/v1/dashboards/:id/templates/:tid", EnsureEditor(service.ReplaceTemplate))
// Databases
router.GET("/chronograf/v1/sources/:id/dbs", EnsureViewer(service.GetDatabases))
router.POST("/chronograf/v1/sources/:id/dbs", EnsureEditor(service.NewDatabase))
router.DELETE("/chronograf/v1/sources/:id/dbs/:db", EnsureEditor(service.DropDatabase))
// Retention Policies
router.GET("/chronograf/v1/sources/:id/dbs/:db/rps", EnsureViewer(service.RetentionPolicies))
router.POST("/chronograf/v1/sources/:id/dbs/:db/rps", EnsureEditor(service.NewRetentionPolicy))
router.PUT("/chronograf/v1/sources/:id/dbs/:db/rps/:rp", EnsureEditor(service.UpdateRetentionPolicy))
router.DELETE("/chronograf/v1/sources/:id/dbs/:db/rps/:rp", EnsureEditor(service.DropRetentionPolicy))
// Measurements
router.GET("/chronograf/v1/sources/:id/dbs/:db/measurements", EnsureViewer(service.Measurements))
// Global application config for Chronograf
router.GET("/chronograf/v1/config", EnsureSuperAdmin(service.Config))
router.GET("/chronograf/v1/config/auth", EnsureSuperAdmin(service.AuthConfig))
router.PUT("/chronograf/v1/config/auth", EnsureSuperAdmin(service.ReplaceAuthConfig))
// Organization config settings for Chronograf
router.GET("/chronograf/v1/org_config", EnsureViewer(service.OrganizationConfig))
router.GET("/chronograf/v1/org_config/logviewer", EnsureViewer(service.OrganizationLogViewerConfig))
router.PUT("/chronograf/v1/org_config/logviewer", EnsureEditor(service.ReplaceOrganizationLogViewerConfig))
router.GET("/chronograf/v1/env", EnsureViewer(service.Environment))
allRoutes := &AllRoutes{
Logger: opts.Logger,
StatusFeed: opts.StatusFeedURL,
CustomLinks: opts.CustomLinks,
}
getPrincipal := func(r *http.Request) oauth2.Principal {
p, _ := HasAuthorizedToken(opts.Auth, r)
return p
}
allRoutes.GetPrincipal = getPrincipal
router.Handler("GET", "/chronograf/v1/", allRoutes)
var out http.Handler
/* Authentication */
if opts.UseAuth {
// Encapsulate the router with OAuth2
var auth http.Handler
auth, allRoutes.AuthRoutes = AuthAPI(opts, router)
allRoutes.LogoutLink = path.Join(opts.Basepath, "/oauth/logout")
// Create middleware that redirects to the appropriate provider logout
router.GET("/oauth/logout", Logout("/", opts.Basepath, allRoutes.AuthRoutes))
out = Logger(opts.Logger, FlushingHandler(auth))
} else {
out = Logger(opts.Logger, FlushingHandler(router))
}
return out
}
// AuthAPI adds the OAuth routes if auth is enabled.
func AuthAPI(opts MuxOpts, router chronograf.Router) (http.Handler, AuthRoutes) {
routes := AuthRoutes{}
for _, pf := range opts.ProviderFuncs {
pf(func(p oauth2.Provider, m oauth2.Mux) {
urlName := PathEscape(strings.ToLower(p.Name()))
loginPath := path.Join("/oauth", urlName, "login")
logoutPath := path.Join("/oauth", urlName, "logout")
callbackPath := path.Join("/oauth", urlName, "callback")
router.Handler("GET", loginPath, m.Login())
router.Handler("GET", logoutPath, m.Logout())
router.Handler("GET", callbackPath, m.Callback())
routes = append(routes, AuthRoute{
Name: p.Name(),
Label: strings.Title(p.Name()),
// AuthRoutes are content served to the page. When Basepath is set, it
// says that all content served to the page will be prefixed with the
// basepath. Since these routes are consumed by JS, it will need the
// basepath set to traverse a proxy correctly
Login: path.Join(opts.Basepath, loginPath),
Logout: path.Join(opts.Basepath, logoutPath),
Callback: path.Join(opts.Basepath, callbackPath),
})
})
}
rootPath := path.Join(opts.Basepath, "/chronograf/v1")
logoutPath := path.Join(opts.Basepath, "/oauth/logout")
tokenMiddleware := AuthorizedToken(opts.Auth, opts.Logger, router)
// Wrap the API with token validation middleware.
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cleanPath := path.Clean(r.URL.Path) // compare ignoring path garbage, trailing slashes, etc.
if (strings.HasPrefix(cleanPath, rootPath) && len(cleanPath) > len(rootPath)) || cleanPath == logoutPath {
tokenMiddleware.ServeHTTP(w, r)
return
}
router.ServeHTTP(w, r)
}), routes
}
func encodeJSON(w http.ResponseWriter, status int, v interface{}, logger chronograf.Logger) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
if err := json.NewEncoder(w).Encode(v); err != nil {
unknownErrorWithMessage(w, err, logger)
}
}
// Error writes an JSON message
func Error(w http.ResponseWriter, code int, msg string, logger chronograf.Logger) {
e := ErrorMessage{
Code: code,
Message: msg,
}
b, err := json.Marshal(e)
if err != nil {
code = http.StatusInternalServerError
b = []byte(`{"code": 500, "message":"server_error"}`)
}
logger.
WithField("component", "server").
WithField("http_status ", code).
Error("Error message ", msg)
w.Header().Set("Content-Type", JSONType)
w.WriteHeader(code)
_, _ = w.Write(b)
}
func invalidData(w http.ResponseWriter, err error, logger chronograf.Logger) {
Error(w, http.StatusUnprocessableEntity, fmt.Sprintf("%v", err), logger)
}
func invalidJSON(w http.ResponseWriter, logger chronograf.Logger) {
Error(w, http.StatusBadRequest, "unparsable JSON", logger)
}
func unknownErrorWithMessage(w http.ResponseWriter, err error, logger chronograf.Logger) {
Error(w, http.StatusInternalServerError, fmt.Sprintf("unknown error: %v", err), logger)
}
func notFound(w http.ResponseWriter, id interface{}, logger chronograf.Logger) {
Error(w, http.StatusNotFound, fmt.Sprintf("ID %v not found", id), logger)
}
func paramID(key string, r *http.Request) (int, error) {
ctx := r.Context()
param := jhttprouter.ParamsFromContext(ctx).ByName(key)
id, err := strconv.Atoi(param)
if err != nil {
return -1, fmt.Errorf("error converting ID %s", param)
}
return id, nil
}
func paramStr(key string, r *http.Request) (string, error) {
ctx := r.Context()
param := jhttprouter.ParamsFromContext(ctx).ByName(key)
return param, nil
}

View File

@ -1,180 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"github.com/influxdata/influxdb/v2/chronograf"
)
type organizationConfigLinks struct {
Self string `json:"self"` // Self link mapping to this resource
LogViewer string `json:"logViewer"` // LogViewer link to the organization log viewer config endpoint
}
type organizationConfigResponse struct {
Links organizationConfigLinks `json:"links"`
chronograf.OrganizationConfig
}
func newOrganizationConfigResponse(c chronograf.OrganizationConfig) *organizationConfigResponse {
return &organizationConfigResponse{
Links: organizationConfigLinks{
Self: "/chronograf/v1/org_config",
LogViewer: "/chronograf/v1/org_config/logviewer",
},
OrganizationConfig: c,
}
}
type logViewerConfigResponse struct {
Links selfLinks `json:"links"`
chronograf.LogViewerConfig
}
func newLogViewerConfigResponse(c chronograf.LogViewerConfig) *logViewerConfigResponse {
return &logViewerConfigResponse{
Links: selfLinks{
Self: "/chronograf/v1/org_config/logviewer",
},
LogViewerConfig: c,
}
}
// OrganizationConfig retrieves the organization-wide config settings
func (s *Service) OrganizationConfig(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
orgID, ok := hasOrganizationContext(ctx)
if !ok {
Error(w, http.StatusBadRequest, "Organization not found on context", s.Logger)
return
}
config, err := s.Store.OrganizationConfig(ctx).FindOrCreate(ctx, orgID)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
res := newOrganizationConfigResponse(*config)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// OrganizationLogViewerConfig retrieves the log viewer UI section of the organization config
// This uses a FindOrCreate function to ensure that any new organizations have
// default organization config values, without having to associate organization creation with
// organization config creation.
func (s *Service) OrganizationLogViewerConfig(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
orgID, ok := hasOrganizationContext(ctx)
if !ok {
Error(w, http.StatusBadRequest, "Organization not found on context", s.Logger)
return
}
config, err := s.Store.OrganizationConfig(ctx).FindOrCreate(ctx, orgID)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
res := newLogViewerConfigResponse(config.LogViewer)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// ReplaceOrganizationLogViewerConfig replaces the log viewer UI section of the organization config
func (s *Service) ReplaceOrganizationLogViewerConfig(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
orgID, ok := hasOrganizationContext(ctx)
if !ok {
Error(w, http.StatusBadRequest, "Organization not found on context", s.Logger)
return
}
var logViewerConfig chronograf.LogViewerConfig
if err := json.NewDecoder(r.Body).Decode(&logViewerConfig); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := validLogViewerConfig(logViewerConfig); err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
config, err := s.Store.OrganizationConfig(ctx).FindOrCreate(ctx, orgID)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
config.LogViewer = logViewerConfig
if err := s.Store.OrganizationConfig(ctx).Put(ctx, config); err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
res := newLogViewerConfigResponse(config.LogViewer)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// validLogViewerConfig ensures that the request body log viewer UI config is valid
// to be valid, it must: not be empty, have at least one column, not have multiple
// columns with the same name or position value, each column must have a visibility
// of either "visible" or "hidden" and if a column is of type severity, it must have
// at least one severity format of type icon, text, or both
func validLogViewerConfig(c chronograf.LogViewerConfig) error {
if len(c.Columns) == 0 {
return fmt.Errorf("invalid log viewer config: must have at least 1 column")
}
nameMatcher := map[string]bool{}
positionMatcher := map[int32]bool{}
for _, clm := range c.Columns {
iconCount := 0
textCount := 0
visibility := 0
// check that each column has a unique value for the name and position properties
if _, ok := nameMatcher[clm.Name]; ok {
return fmt.Errorf("invalid log viewer config: Duplicate column name %s", clm.Name)
}
nameMatcher[clm.Name] = true
if _, ok := positionMatcher[clm.Position]; ok {
return fmt.Errorf("invalid log viewer config: Multiple columns with same position value")
}
positionMatcher[clm.Position] = true
for _, e := range clm.Encodings {
if e.Type == "visibility" {
visibility++
if !(e.Value == "visible" || e.Value == "hidden") {
return fmt.Errorf("invalid log viewer config: invalid visibility in column %s", clm.Name)
}
}
if clm.Name == "severity" {
if e.Value == "icon" {
iconCount++
} else if e.Value == "text" {
textCount++
}
}
}
if visibility != 1 {
return fmt.Errorf("invalid log viewer config: missing visibility encoding in column %s", clm.Name)
}
if clm.Name == "severity" {
if iconCount+textCount == 0 || iconCount > 1 || textCount > 1 {
return fmt.Errorf("invalid log viewer config: invalid number of severity format encodings in column %s", clm.Name)
}
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,232 +0,0 @@
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/organizations"
"github.com/influxdata/influxdb/v2/chronograf/roles"
)
type organizationRequest struct {
Name string `json:"name"`
DefaultRole string `json:"defaultRole"`
}
func (r *organizationRequest) ValidCreate() error {
if r.Name == "" {
return fmt.Errorf("name required on Chronograf Organization request body")
}
return r.ValidDefaultRole()
}
func (r *organizationRequest) ValidUpdate() error {
if r.Name == "" && r.DefaultRole == "" {
return fmt.Errorf("no fields to update")
}
if r.DefaultRole != "" {
return r.ValidDefaultRole()
}
return nil
}
func (r *organizationRequest) ValidDefaultRole() error {
if r.DefaultRole == "" {
r.DefaultRole = roles.MemberRoleName
}
switch r.DefaultRole {
case roles.MemberRoleName, roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName:
return nil
default:
return fmt.Errorf("default role must be member, viewer, editor, or admin")
}
}
type organizationResponse struct {
Links selfLinks `json:"links"`
chronograf.Organization
}
func newOrganizationResponse(o *chronograf.Organization) *organizationResponse {
if o == nil {
o = &chronograf.Organization{}
}
return &organizationResponse{
Organization: *o,
Links: selfLinks{
Self: fmt.Sprintf("/chronograf/v1/organizations/%s", o.ID),
},
}
}
type organizationsResponse struct {
Links selfLinks `json:"links"`
Organizations []*organizationResponse `json:"organizations"`
}
func newOrganizationsResponse(orgs []chronograf.Organization) *organizationsResponse {
orgsResp := make([]*organizationResponse, len(orgs))
for i, org := range orgs {
orgsResp[i] = newOrganizationResponse(&org)
}
return &organizationsResponse{
Organizations: orgsResp,
Links: selfLinks{
Self: "/chronograf/v1/organizations",
},
}
}
// Organizations retrieves all organizations from store
func (s *Service) Organizations(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
orgs, err := s.Store.Organizations(ctx).All(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
res := newOrganizationsResponse(orgs)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// NewOrganization adds a new organization to store
func (s *Service) NewOrganization(w http.ResponseWriter, r *http.Request) {
var req organizationRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := req.ValidCreate(); err != nil {
invalidData(w, err, s.Logger)
return
}
ctx := r.Context()
org := &chronograf.Organization{
Name: req.Name,
DefaultRole: req.DefaultRole,
}
res, err := s.Store.Organizations(ctx).Add(ctx, org)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
// Now that the organization was created, add the user
// making the request to the organization
user, ok := hasUserContext(ctx)
if !ok {
// Best attempt at cleanup the organization if there were any errors
_ = s.Store.Organizations(ctx).Delete(ctx, res)
Error(w, http.StatusInternalServerError, "failed to retrieve user from context", s.Logger)
return
}
user.Roles = []chronograf.Role{
{
Organization: res.ID,
Name: roles.AdminRoleName,
},
}
orgCtx := context.WithValue(ctx, organizations.ContextKey, res.ID)
_, err = s.Store.Users(orgCtx).Add(orgCtx, user)
if err != nil {
// Best attempt at cleanup the organization if there were any errors adding user to org
_ = s.Store.Organizations(ctx).Delete(ctx, res)
s.Logger.Error("failed to add user to organization", err.Error())
Error(w, http.StatusInternalServerError, "failed to add user to organization", s.Logger)
return
}
co := newOrganizationResponse(res)
location(w, co.Links.Self)
encodeJSON(w, http.StatusCreated, co, s.Logger)
}
// OrganizationID retrieves a organization with ID from store
func (s *Service) OrganizationID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
id := httprouter.GetParamFromContext(ctx, "oid")
org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &id})
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
res := newOrganizationResponse(org)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// UpdateOrganization updates an organization in the organizations store
func (s *Service) UpdateOrganization(w http.ResponseWriter, r *http.Request) {
var req organizationRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := req.ValidUpdate(); err != nil {
invalidData(w, err, s.Logger)
return
}
ctx := r.Context()
id := httprouter.GetParamFromContext(ctx, "oid")
org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &id})
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
if req.Name != "" {
org.Name = req.Name
}
if req.DefaultRole != "" {
org.DefaultRole = req.DefaultRole
}
err = s.Store.Organizations(ctx).Update(ctx, org)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
res := newOrganizationResponse(org)
location(w, res.Links.Self)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// RemoveOrganization removes an organization in the organizations store
func (s *Service) RemoveOrganization(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
id := httprouter.GetParamFromContext(ctx, "oid")
org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &id})
if err != nil {
Error(w, http.StatusNotFound, err.Error(), s.Logger)
return
}
if err := s.Store.Organizations(ctx).Delete(ctx, org); err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}

View File

@ -1,726 +0,0 @@
package server
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
"github.com/influxdata/influxdb/v2/chronograf/roles"
)
func TestService_OrganizationID(t *testing.T) {
type fields struct {
OrganizationsStore chronograf.OrganizationsStore
Logger chronograf.Logger
}
type args struct {
w *httptest.ResponseRecorder
r *http.Request
}
tests := []struct {
name string
fields fields
args args
id string
wantStatus int
wantContentType string
wantBody string
}{
{
name: "Get Single Organization",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
},
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
switch *q.ID {
case "1337":
return &chronograf.Organization{
ID: "1337",
Name: "The Good Place",
}, nil
default:
return nil, fmt.Errorf("organization with ID %s not found", *q.ID)
}
},
},
},
id: "1337",
wantStatus: http.StatusOK,
wantContentType: "application/json",
wantBody: `{"links":{"self":"/chronograf/v1/organizations/1337"},"id":"1337","name":"The Good Place"}`,
},
{
name: "Get Single Organization",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
},
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
switch *q.ID {
case "1337":
return &chronograf.Organization{
ID: "1337",
Name: "The Good Place",
}, nil
default:
return nil, fmt.Errorf("organization with ID %s not found", *q.ID)
}
},
},
},
id: "1337",
wantStatus: http.StatusOK,
wantContentType: "application/json",
wantBody: `{"id":"1337","name":"The Good Place","links":{"self":"/chronograf/v1/organizations/1337"}}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
OrganizationsStore: tt.fields.OrganizationsStore,
},
Logger: tt.fields.Logger,
}
tt.args.r = tt.args.r.WithContext(httprouter.WithParams(
context.Background(),
httprouter.Params{
{
Key: "oid",
Value: tt.id,
},
}))
s.OrganizationID(tt.args.w, tt.args.r)
resp := tt.args.w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wantStatus {
t.Errorf("%q. OrganizationID() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus)
}
if tt.wantContentType != "" && content != tt.wantContentType {
t.Errorf("%q. OrganizationID() = %v, want %v", tt.name, content, tt.wantContentType)
}
if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq {
t.Errorf("%q. OrganizationID() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody)
}
})
}
}
func TestService_Organizations(t *testing.T) {
type fields struct {
OrganizationsStore chronograf.OrganizationsStore
Logger chronograf.Logger
}
type args struct {
w *httptest.ResponseRecorder
r *http.Request
}
tests := []struct {
name string
fields fields
args args
wantStatus int
wantContentType string
wantBody string
}{
{
name: "Get Organizations",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
},
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
AllF: func(ctx context.Context) ([]chronograf.Organization, error) {
return []chronograf.Organization{
{
ID: "1337",
Name: "The Good Place",
},
{
ID: "100",
Name: "The Bad Place",
},
}, nil
},
},
},
wantStatus: http.StatusOK,
wantContentType: "application/json",
wantBody: `{"links":{"self":"/chronograf/v1/organizations"},"organizations":[{"links":{"self":"/chronograf/v1/organizations/1337"},"id":"1337","name":"The Good Place"},{"links":{"self":"/chronograf/v1/organizations/100"},"id":"100","name":"The Bad Place"}]}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
OrganizationsStore: tt.fields.OrganizationsStore,
},
Logger: tt.fields.Logger,
}
s.Organizations(tt.args.w, tt.args.r)
resp := tt.args.w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wantStatus {
t.Errorf("%q. Organizations() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus)
}
if tt.wantContentType != "" && content != tt.wantContentType {
t.Errorf("%q. Organizations() = %v, want %v", tt.name, content, tt.wantContentType)
}
if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq {
t.Errorf("%q. Organizations() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody)
}
})
}
}
func TestService_UpdateOrganization(t *testing.T) {
type fields struct {
OrganizationsStore chronograf.OrganizationsStore
Logger chronograf.Logger
}
type args struct {
w *httptest.ResponseRecorder
r *http.Request
org *organizationRequest
}
tests := []struct {
name string
fields fields
args args
id string
wantStatus int
wantContentType string
wantBody string
}{
{
name: "Update Organization name",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
org: &organizationRequest{
Name: "The Bad Place",
},
},
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
UpdateF: func(ctx context.Context, o *chronograf.Organization) error {
return nil
},
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "1337",
Name: "The Good Place",
DefaultRole: roles.ViewerRoleName,
}, nil
},
},
},
id: "1337",
wantStatus: http.StatusOK,
wantContentType: "application/json",
wantBody: `{"id":"1337","name":"The Bad Place","defaultRole":"viewer","links":{"self":"/chronograf/v1/organizations/1337"}}`,
},
{
name: "Update Organization - nothing to update",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
org: &organizationRequest{},
},
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
UpdateF: func(ctx context.Context, o *chronograf.Organization) error {
return nil
},
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "1337",
Name: "The Good Place",
DefaultRole: roles.ViewerRoleName,
}, nil
},
},
},
id: "1337",
wantStatus: http.StatusUnprocessableEntity,
wantContentType: "application/json",
wantBody: `{"code":422,"message":"no fields to update"}`,
},
{
name: "Update Organization default role",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
org: &organizationRequest{
DefaultRole: roles.ViewerRoleName,
},
},
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
UpdateF: func(ctx context.Context, o *chronograf.Organization) error {
return nil
},
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "1337",
Name: "The Good Place",
DefaultRole: roles.MemberRoleName,
}, nil
},
},
},
id: "1337",
wantStatus: http.StatusOK,
wantContentType: "application/json",
wantBody: `{"links":{"self":"/chronograf/v1/organizations/1337"},"id":"1337","name":"The Good Place","defaultRole":"viewer"}`,
},
{
name: "Update Organization - invalid update",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
org: &organizationRequest{},
},
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
UpdateF: func(ctx context.Context, o *chronograf.Organization) error {
return nil
},
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return nil, nil
},
},
},
id: "1337",
wantStatus: http.StatusUnprocessableEntity,
wantContentType: "application/json",
wantBody: `{"code":422,"message":"no fields to update"}`,
},
{
name: "Update Organization - invalid role",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
org: &organizationRequest{
DefaultRole: "sillyrole",
},
},
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
UpdateF: func(ctx context.Context, o *chronograf.Organization) error {
return nil
},
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return nil, nil
},
},
},
id: "1337",
wantStatus: http.StatusUnprocessableEntity,
wantContentType: "application/json",
wantBody: `{"code":422,"message":"default role must be member, viewer, editor, or admin"}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
OrganizationsStore: tt.fields.OrganizationsStore,
},
Logger: tt.fields.Logger,
}
tt.args.r = tt.args.r.WithContext(httprouter.WithParams(context.Background(),
httprouter.Params{
{
Key: "oid",
Value: tt.id,
},
}))
buf, _ := json.Marshal(tt.args.org)
tt.args.r.Body = ioutil.NopCloser(bytes.NewReader(buf))
s.UpdateOrganization(tt.args.w, tt.args.r)
resp := tt.args.w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wantStatus {
t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus)
}
if tt.wantContentType != "" && content != tt.wantContentType {
t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, content, tt.wantContentType)
}
if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq {
t.Errorf("%q. NewOrganization() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody)
}
})
}
}
func TestService_RemoveOrganization(t *testing.T) {
type fields struct {
OrganizationsStore chronograf.OrganizationsStore
Logger chronograf.Logger
}
type args struct {
w *httptest.ResponseRecorder
r *http.Request
}
tests := []struct {
name string
fields fields
args args
id string
wantStatus int
}{
{
name: "Update Organization name",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
},
fields: fields{
Logger: &chronograf.NoopLogger{},
OrganizationsStore: &mocks.OrganizationsStore{
DeleteF: func(ctx context.Context, o *chronograf.Organization) error {
return nil
},
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
switch *q.ID {
case "1337":
return &chronograf.Organization{
ID: "1337",
Name: "The Good Place",
}, nil
default:
return nil, fmt.Errorf("organization with ID %s not found", *q.ID)
}
},
},
},
id: "1337",
wantStatus: http.StatusNoContent,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
OrganizationsStore: tt.fields.OrganizationsStore,
},
Logger: tt.fields.Logger,
}
tt.args.r = tt.args.r.WithContext(httprouter.WithParams(context.Background(),
httprouter.Params{
{
Key: "oid",
Value: tt.id,
},
}))
s.RemoveOrganization(tt.args.w, tt.args.r)
resp := tt.args.w.Result()
if resp.StatusCode != tt.wantStatus {
t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus)
}
})
}
}
func TestService_NewOrganization(t *testing.T) {
type fields struct {
OrganizationsStore chronograf.OrganizationsStore
UsersStore chronograf.UsersStore
Logger chronograf.Logger
}
type args struct {
w *httptest.ResponseRecorder
r *http.Request
org *organizationRequest
user *chronograf.User
}
tests := []struct {
name string
fields fields
args args
wantStatus int
wantContentType string
wantBody string
}{
{
name: "Create Organization",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
user: &chronograf.User{
ID: 1,
Name: "bobetta",
Provider: "github",
Scheme: "oauth2",
},
org: &organizationRequest{
Name: "The Good Place",
},
},
fields: fields{
Logger: &chronograf.NoopLogger{},
UsersStore: &mocks.UsersStore{
AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) {
return &chronograf.User{
ID: 1,
Name: "bobetta",
Provider: "github",
Scheme: "oauth2",
}, nil
},
},
OrganizationsStore: &mocks.OrganizationsStore{
AddF: func(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "1337",
Name: "The Good Place",
}, nil
},
},
},
wantStatus: http.StatusCreated,
wantContentType: "application/json",
wantBody: `{"id":"1337","name":"The Good Place","links":{"self":"/chronograf/v1/organizations/1337"}}`,
},
{
name: "Fail to create Organization - no org name",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
user: &chronograf.User{
ID: 1,
Name: "bobetta",
Provider: "github",
Scheme: "oauth2",
},
org: &organizationRequest{},
},
fields: fields{
Logger: &chronograf.NoopLogger{},
UsersStore: &mocks.UsersStore{
AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) {
return &chronograf.User{
ID: 1,
Name: "bobetta",
Provider: "github",
Scheme: "oauth2",
}, nil
},
},
OrganizationsStore: &mocks.OrganizationsStore{
AddF: func(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) {
return nil, nil
},
},
},
wantStatus: http.StatusUnprocessableEntity,
wantContentType: "application/json",
wantBody: `{"code":422,"message":"name required on Chronograf Organization request body"}`,
},
{
name: "Create Organization - no user on context",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
org: &organizationRequest{
Name: "The Good Place",
},
},
fields: fields{
Logger: &chronograf.NoopLogger{},
UsersStore: &mocks.UsersStore{
AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) {
return &chronograf.User{
ID: 1,
Name: "bobetta",
Provider: "github",
Scheme: "oauth2",
}, nil
},
},
OrganizationsStore: &mocks.OrganizationsStore{
AddF: func(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "1337",
Name: "The Good Place",
}, nil
},
DeleteF: func(ctx context.Context, o *chronograf.Organization) error {
return nil
},
},
},
wantStatus: http.StatusInternalServerError,
wantContentType: "application/json",
wantBody: `{"code":500,"message":"failed to retrieve user from context"}`,
},
{
name: "Create Organization - failed to add user to organization",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"GET",
"http://any.url", // can be any valid URL as we are bypassing mux
nil,
),
org: &organizationRequest{
Name: "The Good Place",
},
user: &chronograf.User{
ID: 1,
Name: "bobetta",
Provider: "github",
Scheme: "oauth2",
},
},
fields: fields{
Logger: &chronograf.NoopLogger{},
UsersStore: &mocks.UsersStore{
AddF: func(ctx context.Context, u *chronograf.User) (*chronograf.User, error) {
return nil, fmt.Errorf("failed to add user to org")
},
},
OrganizationsStore: &mocks.OrganizationsStore{
AddF: func(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "1337",
Name: "The Good Place",
}, nil
},
DeleteF: func(ctx context.Context, o *chronograf.Organization) error {
return nil
},
},
},
wantStatus: http.StatusInternalServerError,
wantContentType: "application/json",
wantBody: `{"code":500,"message":"failed to add user to organization"}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
Store: &mocks.Store{
OrganizationsStore: tt.fields.OrganizationsStore,
UsersStore: tt.fields.UsersStore,
},
Logger: tt.fields.Logger,
}
ctx := tt.args.r.Context()
ctx = context.WithValue(ctx, UserContextKey, tt.args.user)
tt.args.r = tt.args.r.WithContext(ctx)
buf, _ := json.Marshal(tt.args.org)
tt.args.r.Body = ioutil.NopCloser(bytes.NewReader(buf))
s.NewOrganization(tt.args.w, tt.args.r)
resp := tt.args.w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wantStatus {
t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus)
}
if tt.wantContentType != "" && content != tt.wantContentType {
t.Errorf("%q. NewOrganization() = %v, want %v", tt.name, content, tt.wantContentType)
}
if eq, _ := jsonEqual(string(body), tt.wantBody); tt.wantBody != "" && !eq {
t.Errorf("%q. NewOrganization() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody)
}
})
}
}

View File

@ -1,10 +0,0 @@
package server
import "net/url"
// PathEscape escapes the string so it can be safely placed inside a URL path segment.
// Change to url.PathEscape for go 1.8
func PathEscape(str string) string {
u := &url.URL{Path: str}
return u.String()
}

View File

@ -1,55 +0,0 @@
package server
import (
"fmt"
"net/http"
"github.com/influxdata/influxdb/v2/chronograf"
)
// Permissions returns all possible permissions for this source.
func (s *Service) Permissions(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
src, err := s.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, s.Logger)
return
}
ts, err := s.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, s.Logger)
return
}
perms := ts.Permissions(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
httpAPISrcs := "/chronograf/v1/sources"
res := struct {
Permissions chronograf.Permissions `json:"permissions"`
Links map[string]string `json:"links"` // Links are URI locations related to user
}{
Permissions: perms,
Links: map[string]string{
"self": fmt.Sprintf("%s/%d/permissions", httpAPISrcs, srcID),
"source": fmt.Sprintf("%s/%d", httpAPISrcs, srcID),
},
}
encodeJSON(w, http.StatusOK, res, s.Logger)
}

View File

@ -1,114 +0,0 @@
package server
import (
"bytes"
"context"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
func TestService_Permissions(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
TimeSeries TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
}
type args struct {
w *httptest.ResponseRecorder
r *http.Request
}
tests := []struct {
name string
fields fields
args args
ID string
wantStatus int
wantContentType string
wantBody string
}{
{
name: "New user for data source",
args: args{
w: httptest.NewRecorder(),
r: httptest.NewRequest(
"POST",
"http://server.local/chronograf/v1/sources/1",
ioutil.NopCloser(
bytes.NewReader([]byte(`{"name": "marty", "password": "the_lake"}`)))),
},
fields: fields{
UseAuth: true,
Logger: &chronograf.NoopLogger{},
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
return chronograf.Source{
ID: 1,
Name: "muh source",
Username: "name",
Password: "hunter2",
URL: "http://localhost:8086",
}, nil
},
},
TimeSeries: &mocks.TimeSeries{
ConnectF: func(ctx context.Context, src *chronograf.Source) error {
return nil
},
PermissionsF: func(ctx context.Context) chronograf.Permissions {
return chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"READ", "WRITE"},
},
}
},
},
},
ID: "1",
wantStatus: http.StatusOK,
wantContentType: "application/json",
wantBody: `{"permissions":[{"scope":"all","allowed":["READ","WRITE"]}],"links":{"self":"/chronograf/v1/sources/1/permissions","source":"/chronograf/v1/sources/1"}}
`,
},
}
for _, tt := range tests {
tt.args.r = tt.args.r.WithContext(context.WithValue(
context.TODO(),
httprouter.ParamsKey,
httprouter.Params{
{
Key: "id",
Value: tt.ID,
},
}))
h := &Service{
Store: &mocks.Store{
SourcesStore: tt.fields.SourcesStore,
},
TimeSeriesClient: tt.fields.TimeSeries,
Logger: tt.fields.Logger,
UseAuth: tt.fields.UseAuth,
}
h.Permissions(tt.args.w, tt.args.r)
resp := tt.args.w.Result()
content := resp.Header.Get("Content-Type")
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != tt.wantStatus {
t.Errorf("%q. Permissions() = %v, want %v", tt.name, resp.StatusCode, tt.wantStatus)
}
if tt.wantContentType != "" && content != tt.wantContentType {
t.Errorf("%q. Permissions() = %v, want %v", tt.name, content, tt.wantContentType)
}
if tt.wantBody != "" && string(body) != tt.wantBody {
t.Errorf("%q. Permissions() = \n***%v***\n,\nwant\n***%v***", tt.name, string(body), tt.wantBody)
}
}
}

View File

@ -1,34 +0,0 @@
package server
import (
"net/http"
)
type flushingResponseWriter struct {
http.ResponseWriter
}
func (f *flushingResponseWriter) WriteHeader(status int) {
f.ResponseWriter.WriteHeader(status)
}
// Flush is here because the underlying HTTP chunked transfer response writer
// to implement http.Flusher. Without it data is silently buffered. This
// was discovered when proxying kapacitor chunked logs.
func (f *flushingResponseWriter) Flush() {
if flusher, ok := f.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
}
}
// FlushingHandler may not actually do anything, but it was ostensibly
// implemented to flush response writers that can be flushed for the
// purposes in the comment above.
func FlushingHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
iw := &flushingResponseWriter{
ResponseWriter: w,
}
next.ServeHTTP(iw, r)
})
}

View File

@ -1,121 +0,0 @@
package server
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
)
// Proxy proxies requests to services using the path query parameter.
func (s *Service) Proxy(w http.ResponseWriter, r *http.Request) {
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
path := r.URL.Query().Get("path")
if path == "" {
Error(w, http.StatusUnprocessableEntity, "path query parameter required", s.Logger)
return
}
ctx := r.Context()
srv, err := s.Store.Servers(ctx).Get(ctx, id)
if err != nil || srv.SrcID != srcID {
notFound(w, id, s.Logger)
return
}
// To preserve any HTTP query arguments to the kapacitor path,
// we concat and parse them into u.
uri := singleJoiningSlash(srv.URL, path)
u, err := url.Parse(uri)
if err != nil {
msg := fmt.Sprintf("Error parsing kapacitor url: %v", err)
Error(w, http.StatusUnprocessableEntity, msg, s.Logger)
return
}
director := func(req *http.Request) {
// Set the Host header of the original Kapacitor URL
req.Host = u.Host
req.URL = u
// Because we are acting as a proxy, kapacitor needs to have the basic auth information set as
// a header directly
if srv.Username != "" && srv.Password != "" {
req.SetBasicAuth(srv.Username, srv.Password)
}
}
// Without a FlushInterval the HTTP Chunked response for kapacitor logs is
// buffered and flushed every 30 seconds.
proxy := &httputil.ReverseProxy{
Director: director,
FlushInterval: time.Second,
}
// The connection to kapacitor is using a self-signed certificate.
// This modifies uses the same values as http.DefaultTransport but specifies
// InsecureSkipVerify
if srv.InsecureSkipVerify {
proxy.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
proxy.ServeHTTP(w, r)
}
// ProxyPost proxies POST to service
func (s *Service) ProxyPost(w http.ResponseWriter, r *http.Request) {
s.Proxy(w, r)
}
// ProxyPatch proxies PATCH to Service
func (s *Service) ProxyPatch(w http.ResponseWriter, r *http.Request) {
s.Proxy(w, r)
}
// ProxyGet proxies GET to service
func (s *Service) ProxyGet(w http.ResponseWriter, r *http.Request) {
s.Proxy(w, r)
}
// ProxyDelete proxies DELETE to service
func (s *Service) ProxyDelete(w http.ResponseWriter, r *http.Request) {
s.Proxy(w, r)
}
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
if aslash && bslash {
return a + b[1:]
}
if !aslash && !bslash {
return a + "/" + b
}
return a + b
}

View File

@ -1,134 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"time"
"golang.org/x/net/context"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/influx"
"github.com/influxdata/influxdb/v2/chronograf/influx/queries"
)
// QueryRequest is query that will be converted to a queryConfig
type QueryRequest struct {
ID string `json:"id"`
Query string `json:"query"`
}
// QueriesRequest converts all queries to queryConfigs with the help
// of the template variables
type QueriesRequest struct {
Queries []QueryRequest `json:"queries"`
TemplateVars []chronograf.TemplateVar `json:"tempVars,omitempty"`
}
// QueryResponse is the return result of a QueryRequest including
// the raw query, the templated query, the queryConfig and the queryAST
type QueryResponse struct {
Duration int64 `json:"durationMs"`
ID string `json:"id"`
Query string `json:"query"`
QueryConfig chronograf.QueryConfig `json:"queryConfig"`
QueryAST *queries.SelectStatement `json:"queryAST,omitempty"`
QueryTemplated *string `json:"queryTemplated,omitempty"`
}
// QueriesResponse is the response for a QueriesRequest
type QueriesResponse struct {
Queries []QueryResponse `json:"queries"`
}
// Queries analyzes InfluxQL to produce front-end friendly QueryConfig
func (s *Service) Queries(w http.ResponseWriter, r *http.Request) {
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
src, err := s.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, s.Logger)
return
}
var req QueriesRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
res := QueriesResponse{
Queries: make([]QueryResponse, len(req.Queries)),
}
for i, q := range req.Queries {
qr := QueryResponse{
ID: q.ID,
Query: q.Query,
}
qc := ToQueryConfig(q.Query)
if err := s.DefaultRP(ctx, &qc, &src); err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
qc.Shifts = []chronograf.TimeShift{}
qr.QueryConfig = qc
if stmt, err := queries.ParseSelect(q.Query); err == nil {
qr.QueryAST = stmt
}
if dur, err := influx.ParseTime(q.Query, time.Now()); err == nil {
ms := dur.Nanoseconds() / int64(time.Millisecond)
if ms == 0 {
ms = 1
}
qr.Duration = ms
}
qr.QueryConfig.ID = q.ID
res.Queries[i] = qr
}
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// DefaultRP will add the default retention policy to the QC if one has not been specified
func (s *Service) DefaultRP(ctx context.Context, qc *chronograf.QueryConfig, src *chronograf.Source) error {
// Only need to find the default RP IFF the qc's rp is empty
if qc.RetentionPolicy != "" {
return nil
}
// For queries without databases, measurements, or fields we will not
// be able to find an RP
if qc.Database == "" || qc.Measurement == "" || len(qc.Fields) == 0 {
return nil
}
db := s.Databases
if err := db.Connect(ctx, src); err != nil {
return fmt.Errorf("unable to connect to source: %v", err)
}
rps, err := db.AllRP(ctx, qc.Database)
if err != nil {
return fmt.Errorf("unable to load RPs from DB %s: %v", qc.Database, err)
}
for _, rp := range rps {
if rp.Default {
qc.RetentionPolicy = rp.Name
return nil
}
}
return nil
}

View File

@ -1,112 +0,0 @@
package server
import (
"bytes"
"context"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
func TestService_Queries(t *testing.T) {
tests := []struct {
name string
SourcesStore chronograf.SourcesStore
ID string
w *httptest.ResponseRecorder
r *http.Request
want string
}{
{
name: "bad json",
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
return chronograf.Source{
ID: ID,
}, nil
},
},
ID: "1",
w: httptest.NewRecorder(),
r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte(`howdy`))),
want: `{"code":400,"message":"unparsable JSON"}`,
},
{
name: "bad id",
ID: "howdy",
w: httptest.NewRecorder(),
r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte{})),
want: `{"code":422,"message":"error converting ID howdy"}`,
},
{
name: "query with no template vars",
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
return chronograf.Source{
ID: ID,
}, nil
},
},
ID: "1",
w: httptest.NewRecorder(),
r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte(`{
"queries": [
{
"query": "SELECT \"pingReq\" FROM db.\"monitor\".\"httpd\" WHERE time > now() - 1m",
"id": "82b60d37-251e-4afe-ac93-ca20a3642b11"
}
]}`))),
want: `{"queries":[{"durationMs":59999,"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","query":"SELECT \"pingReq\" FROM db.\"monitor\".\"httpd\" WHERE time \u003e now() - 1m","queryConfig":{"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","database":"db","measurement":"httpd","retentionPolicy":"monitor","fields":[{"value":"pingReq","type":"field","alias":""}],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":null,"range":{"upper":"","lower":"now() - 1m"},"shifts":[]},"queryAST":{"condition":{"expr":"binary","op":"\u003e","lhs":{"expr":"reference","val":"time"},"rhs":{"expr":"binary","op":"-","lhs":{"expr":"call","name":"now"},"rhs":{"expr":"literal","val":"1m","type":"duration"}}},"fields":[{"column":{"expr":"reference","val":"pingReq"}}],"sources":[{"database":"db","retentionPolicy":"monitor","name":"httpd","type":"measurement"}]}}]}
`,
},
{
name: "query with unparsable query",
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, ID int) (chronograf.Source, error) {
return chronograf.Source{
ID: ID,
}, nil
},
},
ID: "1",
w: httptest.NewRecorder(),
r: httptest.NewRequest("POST", "/queries", bytes.NewReader([]byte(`{
"queries": [
{
"query": "SHOW DATABASES",
"id": "82b60d37-251e-4afe-ac93-ca20a3642b11"
}
]}`))),
want: `{"queries":[{"durationMs":0,"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","query":"SHOW DATABASES","queryConfig":{"id":"82b60d37-251e-4afe-ac93-ca20a3642b11","database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SHOW DATABASES","range":null,"shifts":[]}}]}
`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.r = tt.r.WithContext(context.WithValue(
context.TODO(),
httprouter.ParamsKey,
httprouter.Params{
{
Key: "id",
Value: tt.ID,
},
}))
s := &Service{
Store: &mocks.Store{
SourcesStore: tt.SourcesStore,
},
Logger: &mocks.TestLogger{},
}
s.Queries(tt.w, tt.r)
got := tt.w.Body.String()
if got != tt.want {
t.Errorf("got:\n%s\nwant:\n%s\n", got, tt.want)
}
})
}
}

View File

@ -1,51 +0,0 @@
package server
import (
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/influx"
)
// ToQueryConfig converts InfluxQL into queryconfigs
// If influxql cannot be represented by a full query config, then, the
// query config's raw text is set to the query.
func ToQueryConfig(query string) chronograf.QueryConfig {
qc, err := influx.Convert(query)
if err == nil {
return qc
}
return chronograf.QueryConfig{
RawText: &query,
Fields: []chronograf.Field{},
GroupBy: chronograf.GroupBy{
Tags: []string{},
},
Tags: make(map[string][]string),
}
}
var validFieldTypes = map[string]bool{
"func": true,
"field": true,
"integer": true,
"number": true,
"regex": true,
"wildcard": true,
}
// ValidateQueryConfig checks any query config input
func ValidateQueryConfig(q *chronograf.QueryConfig) error {
for _, fld := range q.Fields {
invalid := fmt.Errorf(`invalid field type "%s" ; expect func, field, integer, number, regex, wildcard`, fld.Type)
if !validFieldTypes[fld.Type] {
return invalid
}
for _, arg := range fld.Args {
if !validFieldTypes[arg.Type] {
return invalid
}
}
}
return nil
}

View File

@ -1,50 +0,0 @@
package server
import (
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestValidateQueryConfig(t *testing.T) {
tests := []struct {
name string
q *chronograf.QueryConfig
wantErr bool
}{
{
name: "invalid field type",
q: &chronograf.QueryConfig{
Fields: []chronograf.Field{
{
Type: "invalid",
},
},
},
wantErr: true,
},
{
name: "invalid field args",
q: &chronograf.QueryConfig{
Fields: []chronograf.Field{
{
Type: "func",
Args: []chronograf.Field{
{
Type: "invalid",
},
},
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ValidateQueryConfig(tt.q); (err != nil) != tt.wantErr {
t.Errorf("ValidateQueryConfig() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@ -1,39 +0,0 @@
package server
import (
"fmt"
"net/http"
)
const index = `<!DOCTYPE html>
<html>
<head>
<title>Chronograf API</title>
<!-- needed for adaptive design -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<!--
ReDoc doesn't change outer page styles
-->
<style>
body {
margin: 0;
padding: 0;
}
</style>
</head>
<body>
<redoc spec-url='%s'></redoc>
<script src="https://rebilly.github.io/ReDoc/releases/latest/redoc.min.js"> </script>
</body>
</html>
`
// Redoc servers the swagger JSON using the redoc package.
func Redoc(swagger string) http.HandlerFunc {
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write([]byte(fmt.Sprintf(index, swagger)))
})
}

View File

@ -1,122 +0,0 @@
package server
import (
"fmt"
"net/http"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/oauth2"
)
// AuthRoute are the routes for each type of OAuth2 provider
type AuthRoute struct {
Name string `json:"name"` // Name uniquely identifies the provider
Label string `json:"label"` // Label is a user-facing string to present in the UI
Login string `json:"login"` // Login is the route to the login redirect path
Logout string `json:"logout"` // Logout is the route to the logout redirect path
Callback string `json:"callback"` // Callback is the route the provider calls to exchange the code/state
}
// AuthRoutes contains all OAuth2 provider routes.
type AuthRoutes []AuthRoute
// Lookup searches all the routes for a specific provider
func (r *AuthRoutes) Lookup(provider string) (AuthRoute, bool) {
for _, route := range *r {
if route.Name == provider {
return route, true
}
}
return AuthRoute{}, false
}
type getRoutesResponse struct {
Layouts string `json:"layouts"` // Location of the layouts endpoint
Users string `json:"users"` // Location of the users endpoint
AllUsers string `json:"allUsers"` // Location of the raw users endpoint
Organizations string `json:"organizations"` // Location of the organizations endpoint
Mappings string `json:"mappings"` // Location of the application mappings endpoint
Sources string `json:"sources"` // Location of the sources endpoint
Me string `json:"me"` // Location of the me endpoint
Environment string `json:"environment"` // Location of the environment endpoint
Dashboards string `json:"dashboards"` // Location of the dashboards endpoint
Config getConfigLinksResponse `json:"config"` // Location of the config endpoint and its various sections
Cells string `json:"cells"` // Location of the v2 cells
DashboardsV2 string `json:"dashboardsv2"` // Location of the v2 dashboards
Auth []AuthRoute `json:"auth"` // Location of all auth routes.
Logout *string `json:"logout,omitempty"` // Location of the logout route for all auth routes
ExternalLinks getExternalLinksResponse `json:"external"` // All external links for the client to use
OrganizationConfig getOrganizationConfigLinksResponse `json:"orgConfig"` // Location of the organization config endpoint
Flux getFluxLinksResponse `json:"flux"`
}
// AllRoutes is a handler that returns all links to resources in Chronograf server, as well as
// external links for the client to know about, such as for JSON feeds or custom side nav buttons.
// Optionally, routes for authentication can be returned.
type AllRoutes struct {
GetPrincipal func(r *http.Request) oauth2.Principal // GetPrincipal is used to retrieve the principal on http request.
AuthRoutes []AuthRoute // Location of all auth routes. If no auth, this can be empty.
LogoutLink string // Location of the logout route for all auth routes. If no auth, this can be empty.
StatusFeed string // External link to the JSON Feed for the News Feed on the client's Status Page
CustomLinks map[string]string // Custom external links for client's User menu, as passed in via CLI/ENV
Logger chronograf.Logger
}
// serveHTTP returns all top level routes and external links within chronograf
func (a *AllRoutes) ServeHTTP(w http.ResponseWriter, r *http.Request) {
customLinks, err := NewCustomLinks(a.CustomLinks)
if err != nil {
Error(w, http.StatusInternalServerError, err.Error(), a.Logger)
return
}
org := "default"
if a.GetPrincipal != nil {
// If there is a principal, use the organization to populate the users routes
// otherwise use the default organization
if p := a.GetPrincipal(r); p.Organization != "" {
org = p.Organization
}
}
routes := getRoutesResponse{
Sources: "/chronograf/v1/sources",
Layouts: "/chronograf/v1/layouts",
Users: fmt.Sprintf("/chronograf/v1/organizations/%s/users", org),
AllUsers: "/chronograf/v1/users",
Organizations: "/chronograf/v1/organizations",
Me: "/chronograf/v1/me",
Environment: "/chronograf/v1/env",
Mappings: "/chronograf/v1/mappings",
Dashboards: "/chronograf/v1/dashboards",
DashboardsV2: "/chronograf/v2/dashboards",
Cells: "/chronograf/v2/cells",
Config: getConfigLinksResponse{
Self: "/chronograf/v1/config",
Auth: "/chronograf/v1/config/auth",
},
OrganizationConfig: getOrganizationConfigLinksResponse{
Self: "/chronograf/v1/org_config",
LogViewer: "/chronograf/v1/org_config/logviewer",
},
Auth: make([]AuthRoute, len(a.AuthRoutes)), // We want to return at least an empty array, rather than null
ExternalLinks: getExternalLinksResponse{
StatusFeed: &a.StatusFeed,
CustomLinks: customLinks,
},
Flux: getFluxLinksResponse{
Self: "/chronograf/v1/flux",
AST: "/chronograf/v1/flux/ast",
Suggestions: "/chronograf/v1/flux/suggestions",
},
}
// The JSON response will have no field present for the LogoutLink if there is no logout link.
if a.LogoutLink != "" {
routes.Logout = &a.LogoutLink
}
copy(routes.Auth, a.AuthRoutes)
encodeJSON(w, http.StatusOK, routes, a.Logger)
}

View File

@ -1,121 +0,0 @@
package server
import (
"encoding/json"
"io/ioutil"
"net/http/httptest"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestAllRoutes(t *testing.T) {
logger := &chronograf.NoopLogger{}
handler := &AllRoutes{
Logger: logger,
}
req := httptest.NewRequest("GET", "http://docbrowns-inventions.com", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
resp := w.Result()
body, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
t.Error("TestAllRoutes not able to retrieve body")
}
var routes getRoutesResponse
if err := json.Unmarshal(body, &routes); err != nil {
t.Error("TestAllRoutes not able to unmarshal JSON response")
}
want := `{"dashboardsv2":"/chronograf/v2/dashboards","orgConfig":{"self":"/chronograf/v1/org_config","logViewer":"/chronograf/v1/org_config/logviewer"},"cells":"/chronograf/v2/cells","layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[],"external":{"statusFeed":""},"flux":{"ast":"/chronograf/v1/flux/ast","self":"/chronograf/v1/flux","suggestions":"/chronograf/v1/flux/suggestions"}}
`
eq, err := jsonEqual(want, string(body))
if err != nil {
t.Fatalf("error decoding json: %v", err)
}
if !eq {
t.Errorf("TestAllRoutes\nwanted\n*%s*\ngot\n*%s*", want, string(body))
}
}
func TestAllRoutesWithAuth(t *testing.T) {
logger := &chronograf.NoopLogger{}
handler := &AllRoutes{
AuthRoutes: []AuthRoute{
{
Name: "github",
Label: "GitHub",
Login: "/oauth/github/login",
Logout: "/oauth/github/logout",
Callback: "/oauth/github/callback",
},
},
LogoutLink: "/oauth/logout",
Logger: logger,
}
req := httptest.NewRequest("GET", "http://docbrowns-inventions.com", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
resp := w.Result()
body, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
t.Error("TestAllRoutesWithAuth not able to retrieve body")
}
var routes getRoutesResponse
if err := json.Unmarshal(body, &routes); err != nil {
t.Error("TestAllRoutesWithAuth not able to unmarshal JSON response")
}
want := `{"dashboardsv2":"/chronograf/v2/dashboards","orgConfig":{"self":"/chronograf/v1/org_config","logViewer":"/chronograf/v1/org_config/logviewer"},"cells":"/chronograf/v2/cells","layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[{"name":"github","label":"GitHub","login":"/oauth/github/login","logout":"/oauth/github/logout","callback":"/oauth/github/callback"}],"logout":"/oauth/logout","external":{"statusFeed":""},"flux":{"ast":"/chronograf/v1/flux/ast","self":"/chronograf/v1/flux","suggestions":"/chronograf/v1/flux/suggestions"}}
`
eq, err := jsonEqual(want, string(body))
if err != nil {
t.Fatalf("error decoding json: %v", err)
}
if !eq {
t.Errorf("TestAllRoutesWithAuth\nwanted\n*%s*\ngot\n*%s*", want, string(body))
}
}
func TestAllRoutesWithExternalLinks(t *testing.T) {
statusFeedURL := "http://pineapple.life/feed.json"
customLinks := map[string]string{
"cubeapple": "https://cube.apple",
}
logger := &chronograf.NoopLogger{}
handler := &AllRoutes{
StatusFeed: statusFeedURL,
CustomLinks: customLinks,
Logger: logger,
}
req := httptest.NewRequest("GET", "http://docbrowns-inventions.com", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
resp := w.Result()
body, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
t.Error("TestAllRoutesWithExternalLinks not able to retrieve body")
}
var routes getRoutesResponse
if err := json.Unmarshal(body, &routes); err != nil {
t.Error("TestAllRoutesWithExternalLinks not able to unmarshal JSON response")
}
want := `{"dashboardsv2":"/chronograf/v2/dashboards","orgConfig":{"self":"/chronograf/v1/org_config","logViewer":"/chronograf/v1/org_config/logviewer"},"cells":"/chronograf/v2/cells","layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[],"external":{"statusFeed":"http://pineapple.life/feed.json","custom":[{"name":"cubeapple","url":"https://cube.apple"}]},"flux":{"ast":"/chronograf/v1/flux/ast","self":"/chronograf/v1/flux","suggestions":"/chronograf/v1/flux/suggestions"}}
`
eq, err := jsonEqual(want, string(body))
if err != nil {
t.Fatalf("error decoding json: %v", err)
}
if !eq {
t.Errorf("TestAllRoutesWithExternalLinks\nwanted\n*%s*\ngot\n*%s*", want, string(body))
}
}

View File

@ -1,572 +0,0 @@
package server
import (
"context"
"crypto/tls"
"fmt"
"log"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"path"
"regexp"
"runtime"
"strconv"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt"
idgen "github.com/influxdata/influxdb/v2/chronograf/id"
"github.com/influxdata/influxdb/v2/chronograf/influx"
"github.com/influxdata/influxdb/v2/chronograf/oauth2"
client "github.com/influxdata/usage-client/v1"
flags "github.com/jessevdk/go-flags"
"github.com/tylerb/graceful"
bbolt "go.etcd.io/bbolt"
)
var (
startTime time.Time
)
func init() {
startTime = time.Now().UTC()
}
// Server for the chronograf API
type Server struct {
Host string `long:"host" description:"The IP to listen on" default:"0.0.0.0" env:"HOST"`
Port int `long:"port" description:"The port to listen on for insecure connections, defaults to a random value" default:"8888" env:"PORT"`
PprofEnabled bool `long:"pprof-enabled" description:"Enable the /debug/pprof/* HTTP routes" env:"PPROF_ENABLED"`
Cert flags.Filename `long:"cert" description:"Path to PEM encoded public key certificate. " env:"TLS_CERTIFICATE"`
Key flags.Filename `long:"key" description:"Path to private key associated with given certificate. " env:"TLS_PRIVATE_KEY"`
InfluxDBURL string `long:"influxdb-url" description:"Location of your InfluxDB instance" env:"INFLUXDB_URL"`
InfluxDBUsername string `long:"influxdb-username" description:"Username for your InfluxDB instance" env:"INFLUXDB_USERNAME"`
InfluxDBPassword string `long:"influxdb-password" description:"Password for your InfluxDB instance" env:"INFLUXDB_PASSWORD"`
KapacitorURL string `long:"kapacitor-url" description:"Location of your Kapacitor instance" env:"KAPACITOR_URL"`
KapacitorUsername string `long:"kapacitor-username" description:"Username of your Kapacitor instance" env:"KAPACITOR_USERNAME"`
KapacitorPassword string `long:"kapacitor-password" description:"Password of your Kapacitor instance" env:"KAPACITOR_PASSWORD"`
NewSources string `long:"new-sources" description:"Config for adding a new InfluxDB source and Kapacitor server, in JSON as an array of objects, and surrounded by single quotes. E.g. --new-sources='[{\"influxdb\":{\"name\":\"Influx 1\",\"username\":\"user1\",\"password\":\"pass1\",\"url\":\"http://localhost:8086\",\"metaUrl\":\"http://metaurl.com\",\"type\":\"influx-enterprise\",\"insecureSkipVerify\":false,\"default\":true,\"telegraf\":\"telegraf\",\"sharedSecret\":\"cubeapples\"},\"kapacitor\":{\"name\":\"Kapa 1\",\"url\":\"http://localhost:9092\",\"active\":true}}]'" env:"NEW_SOURCES" hidden:"true"`
Develop bool `short:"d" long:"develop" description:"Run server in develop mode."`
BoltPath string `short:"b" long:"bolt-path" description:"Full path to boltDB file (e.g. './chronograf-v1.db')" env:"BOLT_PATH" default:"chronograf-v1.db"`
CannedPath string `short:"c" long:"canned-path" description:"Path to directory of pre-canned application layouts (/usr/share/chronograf/canned)" env:"CANNED_PATH" default:"canned"`
ResourcesPath string `long:"resources-path" description:"Path to directory of pre-canned dashboards, sources, kapacitors, and organizations (/usr/share/chronograf/resources)" env:"RESOURCES_PATH" default:"canned"`
TokenSecret string `short:"t" long:"token-secret" description:"Secret to sign tokens" env:"TOKEN_SECRET"`
JwksURL string `long:"jwks-url" description:"URL that returns OpenID Key Discovery JWKS document." env:"JWKS_URL"`
UseIDToken bool `long:"use-id-token" description:"Enable id_token processing." env:"USE_ID_TOKEN"`
AuthDuration time.Duration `long:"auth-duration" default:"720h" description:"Total duration of cookie life for authentication (in hours). 0 means authentication expires on browser close." env:"AUTH_DURATION"`
GithubClientID string `short:"i" long:"github-client-id" description:"Github Client ID for OAuth 2 support" env:"GH_CLIENT_ID"`
GithubClientSecret string `short:"s" long:"github-client-secret" description:"Github Client Secret for OAuth 2 support" env:"GH_CLIENT_SECRET"`
GithubOrgs []string `short:"o" long:"github-organization" description:"Github organization user is required to have active membership" env:"GH_ORGS" env-delim:","`
GoogleClientID string `long:"google-client-id" description:"Google Client ID for OAuth 2 support" env:"GOOGLE_CLIENT_ID"`
GoogleClientSecret string `long:"google-client-secret" description:"Google Client Secret for OAuth 2 support" env:"GOOGLE_CLIENT_SECRET"`
GoogleDomains []string `long:"google-domains" description:"Google email domain user is required to have active membership" env:"GOOGLE_DOMAINS" env-delim:","`
PublicURL string `long:"public-url" description:"Full public URL used to access Chronograf from a web browser. Used for OAuth2 authentication. (http://localhost:8888)" env:"PUBLIC_URL"`
HerokuClientID string `long:"heroku-client-id" description:"Heroku Client ID for OAuth 2 support" env:"HEROKU_CLIENT_ID"`
HerokuSecret string `long:"heroku-secret" description:"Heroku Secret for OAuth 2 support" env:"HEROKU_SECRET"`
HerokuOrganizations []string `long:"heroku-organization" description:"Heroku Organization Memberships a user is required to have for access to Chronograf (comma separated)" env:"HEROKU_ORGS" env-delim:","`
GenericName string `long:"generic-name" description:"Generic OAuth2 name presented on the login page" env:"GENERIC_NAME"`
GenericClientID string `long:"generic-client-id" description:"Generic OAuth2 Client ID. Can be used own OAuth2 service." env:"GENERIC_CLIENT_ID"`
GenericClientSecret string `long:"generic-client-secret" description:"Generic OAuth2 Client Secret" env:"GENERIC_CLIENT_SECRET"`
GenericScopes []string `long:"generic-scopes" description:"Scopes requested by provider of web client." default:"user:email" env:"GENERIC_SCOPES" env-delim:","`
GenericDomains []string `long:"generic-domains" description:"Email domain users' email address to have (example.com)" env:"GENERIC_DOMAINS" env-delim:","`
GenericAuthURL string `long:"generic-auth-url" description:"OAuth 2.0 provider's authorization endpoint URL" env:"GENERIC_AUTH_URL"`
GenericTokenURL string `long:"generic-token-url" description:"OAuth 2.0 provider's token endpoint URL" env:"GENERIC_TOKEN_URL"`
GenericAPIURL string `long:"generic-api-url" description:"URL that returns OpenID UserInfo compatible information." env:"GENERIC_API_URL"`
GenericAPIKey string `long:"generic-api-key" description:"JSON lookup key into OpenID UserInfo. (Azure should be userPrincipalName)" default:"email" env:"GENERIC_API_KEY"`
Auth0Domain string `long:"auth0-domain" description:"Subdomain of auth0.com used for Auth0 OAuth2 authentication" env:"AUTH0_DOMAIN"`
Auth0ClientID string `long:"auth0-client-id" description:"Auth0 Client ID for OAuth2 support" env:"AUTH0_CLIENT_ID"`
Auth0ClientSecret string `long:"auth0-client-secret" description:"Auth0 Client Secret for OAuth2 support" env:"AUTH0_CLIENT_SECRET"`
Auth0Organizations []string `long:"auth0-organizations" description:"Auth0 organizations permitted to access Chronograf (comma separated)" env:"AUTH0_ORGS" env-delim:","`
Auth0SuperAdminOrg string `long:"auth0-superadmin-org" description:"Auth0 organization from which users are automatically granted SuperAdmin status" env:"AUTH0_SUPERADMIN_ORG"`
StatusFeedURL string `long:"status-feed-url" description:"URL of a JSON Feed to display as a News Feed on the client Status page." default:"https://www.influxdata.com/feed/json" env:"STATUS_FEED_URL"`
CustomLinks map[string]string `long:"custom-link" description:"Custom link to be added to the client User menu. Multiple links can be added by using multiple of the same flag with different 'name:url' values, or as an environment variable with comma-separated 'name:url' values. E.g. via flags: '--custom-link=InfluxData:https://www.influxdata.com --custom-link=Chronograf:https://github.com/influxdata/influxdb/chronograf'. E.g. via environment variable: 'export CUSTOM_LINKS=InfluxData:https://www.influxdata.com,Chronograf:https://github.com/influxdata/influxdb/chronograf'" env:"CUSTOM_LINKS" env-delim:","`
TelegrafSystemInterval time.Duration `long:"telegraf-system-interval" default:"1m" description:"Duration used in the GROUP BY time interval for the hosts list" env:"TELEGRAF_SYSTEM_INTERVAL"`
ReportingDisabled bool `short:"r" long:"reporting-disabled" description:"Disable reporting of usage stats (os,arch,version,cluster_id,uptime) once every 24hr" env:"REPORTING_DISABLED"`
LogLevel string `short:"l" long:"log-level" value-name:"choice" choice:"debug" choice:"info" choice:"error" default:"info" description:"Set the logging level" env:"LOG_LEVEL"`
Basepath string `short:"p" long:"basepath" description:"A URL path prefix under which all chronograf routes will be mounted. (Note: PREFIX_ROUTES has been deprecated. Now, if basepath is set, all routes will be prefixed with it.)" env:"BASE_PATH"`
ShowVersion bool `short:"v" long:"version" description:"Show Chronograf version info"`
BuildInfo chronograf.BuildInfo
Listener net.Listener
handler http.Handler
}
func provide(p oauth2.Provider, m oauth2.Mux, ok func() bool) func(func(oauth2.Provider, oauth2.Mux)) {
return func(configure func(oauth2.Provider, oauth2.Mux)) {
if ok() {
configure(p, m)
}
}
}
// UseGithub validates the CLI parameters to enable github oauth support
func (s *Server) UseGithub() bool {
return s.TokenSecret != "" && s.GithubClientID != "" && s.GithubClientSecret != ""
}
// UseGoogle validates the CLI parameters to enable google oauth support
func (s *Server) UseGoogle() bool {
return s.TokenSecret != "" && s.GoogleClientID != "" && s.GoogleClientSecret != "" && s.PublicURL != ""
}
// UseHeroku validates the CLI parameters to enable heroku oauth support
func (s *Server) UseHeroku() bool {
return s.TokenSecret != "" && s.HerokuClientID != "" && s.HerokuSecret != ""
}
// UseAuth0 validates the CLI parameters to enable Auth0 oauth support
func (s *Server) UseAuth0() bool {
return s.Auth0ClientID != "" && s.Auth0ClientSecret != ""
}
// UseGenericOAuth2 validates the CLI parameters to enable generic oauth support
func (s *Server) UseGenericOAuth2() bool {
return s.TokenSecret != "" && s.GenericClientID != "" &&
s.GenericClientSecret != "" && s.GenericAuthURL != "" &&
s.GenericTokenURL != ""
}
func (s *Server) githubOAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) {
gh := oauth2.Github{
ClientID: s.GithubClientID,
ClientSecret: s.GithubClientSecret,
Orgs: s.GithubOrgs,
Logger: logger,
}
jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL)
ghMux := oauth2.NewAuthMux(&gh, auth, jwt, s.Basepath, logger, s.UseIDToken)
return &gh, ghMux, s.UseGithub
}
func (s *Server) googleOAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) {
redirectURL := s.PublicURL + s.Basepath + "/oauth/google/callback"
google := oauth2.Google{
ClientID: s.GoogleClientID,
ClientSecret: s.GoogleClientSecret,
Domains: s.GoogleDomains,
RedirectURL: redirectURL,
Logger: logger,
}
jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL)
goMux := oauth2.NewAuthMux(&google, auth, jwt, s.Basepath, logger, s.UseIDToken)
return &google, goMux, s.UseGoogle
}
func (s *Server) herokuOAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) {
heroku := oauth2.Heroku{
ClientID: s.HerokuClientID,
ClientSecret: s.HerokuSecret,
Organizations: s.HerokuOrganizations,
Logger: logger,
}
jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL)
hMux := oauth2.NewAuthMux(&heroku, auth, jwt, s.Basepath, logger, s.UseIDToken)
return &heroku, hMux, s.UseHeroku
}
func (s *Server) genericOAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) {
gen := oauth2.Generic{
PageName: s.GenericName,
ClientID: s.GenericClientID,
ClientSecret: s.GenericClientSecret,
RequiredScopes: s.GenericScopes,
Domains: s.GenericDomains,
RedirectURL: s.genericRedirectURL(),
AuthURL: s.GenericAuthURL,
TokenURL: s.GenericTokenURL,
APIURL: s.GenericAPIURL,
APIKey: s.GenericAPIKey,
Logger: logger,
}
jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL)
genMux := oauth2.NewAuthMux(&gen, auth, jwt, s.Basepath, logger, s.UseIDToken)
return &gen, genMux, s.UseGenericOAuth2
}
func (s *Server) auth0OAuth(logger chronograf.Logger, auth oauth2.Authenticator) (oauth2.Provider, oauth2.Mux, func() bool) {
redirectPath := path.Join(s.Basepath, "oauth", "auth0", "callback")
redirectURL, err := url.Parse(s.PublicURL)
if err != nil {
logger.Error("Error parsing public URL: err:", err)
return &oauth2.Auth0{}, &oauth2.AuthMux{}, func() bool { return false }
}
redirectURL.Path = redirectPath
auth0, err := oauth2.NewAuth0(s.Auth0Domain, s.Auth0ClientID, s.Auth0ClientSecret, redirectURL.String(), s.Auth0Organizations, logger)
jwt := oauth2.NewJWT(s.TokenSecret, s.JwksURL)
genMux := oauth2.NewAuthMux(&auth0, auth, jwt, s.Basepath, logger, s.UseIDToken)
if err != nil {
logger.Error("Error parsing Auth0 domain: err:", err)
return &auth0, genMux, func() bool { return false }
}
return &auth0, genMux, s.UseAuth0
}
func (s *Server) genericRedirectURL() string {
if s.PublicURL == "" {
return ""
}
genericName := "generic"
if s.GenericName != "" {
genericName = s.GenericName
}
publicURL, err := url.Parse(s.PublicURL)
if err != nil {
return ""
}
publicURL.Path = path.Join(publicURL.Path, s.Basepath, "oauth", genericName, "callback")
return publicURL.String()
}
func (s *Server) useAuth() bool {
return s.UseGithub() || s.UseGoogle() || s.UseHeroku() || s.UseGenericOAuth2() || s.UseAuth0()
}
func (s *Server) useTLS() bool {
return s.Cert != ""
}
// NewListener will an http or https listener depending useTLS()
func (s *Server) NewListener() (net.Listener, error) {
addr := net.JoinHostPort(s.Host, strconv.Itoa(s.Port))
if !s.useTLS() {
listener, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
return listener, nil
}
// If no key specified, therefore, we assume it is in the cert
if s.Key == "" {
s.Key = s.Cert
}
cert, err := tls.LoadX509KeyPair(string(s.Cert), string(s.Key))
if err != nil {
return nil, err
}
listener, err := tls.Listen("tcp", addr, &tls.Config{
Certificates: []tls.Certificate{cert},
})
if err != nil {
return nil, err
}
return listener, nil
}
type builders struct {
Layouts LayoutBuilder
Sources SourcesBuilder
Kapacitors KapacitorBuilder
Dashboards DashboardBuilder
Organizations OrganizationBuilder
}
func (s *Server) newBuilders(logger chronograf.Logger) builders {
return builders{
Layouts: &MultiLayoutBuilder{
Logger: logger,
UUID: &idgen.UUID{},
CannedPath: s.CannedPath,
},
Dashboards: &MultiDashboardBuilder{
Logger: logger,
ID: idgen.NewTime(),
Path: s.ResourcesPath,
},
Sources: &MultiSourceBuilder{
InfluxDBURL: s.InfluxDBURL,
InfluxDBUsername: s.InfluxDBUsername,
InfluxDBPassword: s.InfluxDBPassword,
Logger: logger,
ID: idgen.NewTime(),
Path: s.ResourcesPath,
},
Kapacitors: &MultiKapacitorBuilder{
KapacitorURL: s.KapacitorURL,
KapacitorUsername: s.KapacitorUsername,
KapacitorPassword: s.KapacitorPassword,
Logger: logger,
ID: idgen.NewTime(),
Path: s.ResourcesPath,
},
Organizations: &MultiOrganizationBuilder{
Logger: logger,
Path: s.ResourcesPath,
},
}
}
// Serve starts and runs the chronograf server
func (s *Server) Serve(ctx context.Context) error {
logger := &chronograf.NoopLogger{}
_, err := NewCustomLinks(s.CustomLinks)
if err != nil {
logger.
WithField("component", "server").
WithField("CustomLink", "invalid").
Error(err)
return err
}
service := openService(ctx, s.BuildInfo, s.BoltPath, s.newBuilders(logger), logger, s.useAuth())
service.SuperAdminProviderGroups = superAdminProviderGroups{
auth0: s.Auth0SuperAdminOrg,
}
service.Env = chronograf.Environment{
TelegrafSystemInterval: s.TelegrafSystemInterval,
}
if !validBasepath(s.Basepath) {
err := fmt.Errorf("invalid basepath, must follow format \"/mybasepath\"")
logger.
WithField("component", "server").
WithField("basepath", "invalid").
Error(err)
return err
}
providerFuncs := []func(func(oauth2.Provider, oauth2.Mux)){}
auth := oauth2.NewCookieJWT(s.TokenSecret, s.AuthDuration)
providerFuncs = append(providerFuncs, provide(s.githubOAuth(logger, auth)))
providerFuncs = append(providerFuncs, provide(s.googleOAuth(logger, auth)))
providerFuncs = append(providerFuncs, provide(s.herokuOAuth(logger, auth)))
providerFuncs = append(providerFuncs, provide(s.genericOAuth(logger, auth)))
providerFuncs = append(providerFuncs, provide(s.auth0OAuth(logger, auth)))
s.handler = NewMux(MuxOpts{
Develop: s.Develop,
Auth: auth,
Logger: logger,
UseAuth: s.useAuth(),
ProviderFuncs: providerFuncs,
Basepath: s.Basepath,
StatusFeedURL: s.StatusFeedURL,
CustomLinks: s.CustomLinks,
}, service)
// Add chronograf's version header to all requests
s.handler = Version(s.BuildInfo.Version, s.handler)
if s.useTLS() {
// Add HSTS to instruct all browsers to change from http to https
s.handler = HSTS(s.handler)
}
listener, err := s.NewListener()
if err != nil {
logger.
WithField("component", "server").
Error(err)
return err
}
s.Listener = listener
// Using a log writer for http server logging
w := logger.Writer()
defer w.Close()
stdLog := log.New(w, "", 0)
// TODO: Remove graceful when changing to go 1.8
httpServer := &graceful.Server{
Server: &http.Server{
ErrorLog: stdLog,
Handler: s.handler,
},
Logger: stdLog,
TCPKeepAlive: 5 * time.Second,
}
httpServer.SetKeepAlivesEnabled(true)
if !s.ReportingDisabled {
go reportUsageStats(s.BuildInfo, logger)
}
scheme := "http"
if s.useTLS() {
scheme = "https"
}
logger.
WithField("component", "server").
Info("Serving chronograf at ", scheme, "://", s.Listener.Addr())
if err := httpServer.Serve(s.Listener); err != nil {
logger.
WithField("component", "server").
Error(err)
return err
}
logger.
WithField("component", "server").
Info("Stopped serving chronograf at ", scheme, "://", s.Listener.Addr())
return nil
}
func NewServiceV2(ctx context.Context, d *bbolt.DB) (*Service, error) {
db := bolt.NewClient()
db.WithDB(d)
if err := db.Open(ctx, nil, chronograf.BuildInfo{}); err != nil {
return nil, err
}
logger := &chronograf.NoopLogger{}
return &Service{
TimeSeriesClient: &InfluxClient{},
Store: &DirectStore{
LayoutsStore: db.LayoutsStore,
DashboardsStore: db.DashboardsStore,
SourcesStore: db.SourcesStore,
ServersStore: db.ServersStore,
OrganizationsStore: db.OrganizationsStore,
UsersStore: db.UsersStore,
ConfigStore: db.ConfigStore,
MappingsStore: db.MappingsStore,
OrganizationConfigStore: db.OrganizationConfigStore,
},
// TODO(desa): what to do about logger
Logger: logger,
Databases: &influx.Client{
Logger: logger,
},
}, nil
}
func openService(ctx context.Context, buildInfo chronograf.BuildInfo, boltPath string, builder builders, logger chronograf.Logger, useAuth bool) Service {
db := bolt.NewClient()
db.Path = boltPath
if err := db.Open(ctx, logger, buildInfo, bolt.WithBackup()); err != nil {
logger.
WithField("component", "boltstore").
Error(err)
os.Exit(1)
}
layouts, err := builder.Layouts.Build(db.LayoutsStore)
if err != nil {
logger.
WithField("component", "LayoutsStore").
Error("Unable to construct a MultiLayoutsStore", err)
os.Exit(1)
}
dashboards, err := builder.Dashboards.Build(db.DashboardsStore)
if err != nil {
logger.
WithField("component", "DashboardsStore").
Error("Unable to construct a MultiDashboardsStore", err)
os.Exit(1)
}
sources, err := builder.Sources.Build(db.SourcesStore)
if err != nil {
logger.
WithField("component", "SourcesStore").
Error("Unable to construct a MultiSourcesStore", err)
os.Exit(1)
}
kapacitors, err := builder.Kapacitors.Build(db.ServersStore)
if err != nil {
logger.
WithField("component", "KapacitorStore").
Error("Unable to construct a MultiKapacitorStore", err)
os.Exit(1)
}
organizations, err := builder.Organizations.Build(db.OrganizationsStore)
if err != nil {
logger.
WithField("component", "OrganizationsStore").
Error("Unable to construct a MultiOrganizationStore", err)
os.Exit(1)
}
return Service{
TimeSeriesClient: &InfluxClient{},
Store: &Store{
LayoutsStore: layouts,
DashboardsStore: dashboards,
SourcesStore: sources,
ServersStore: kapacitors,
OrganizationsStore: organizations,
UsersStore: db.UsersStore,
ConfigStore: db.ConfigStore,
MappingsStore: db.MappingsStore,
OrganizationConfigStore: db.OrganizationConfigStore,
},
Logger: logger,
UseAuth: useAuth,
Databases: &influx.Client{Logger: logger},
}
}
// reportUsageStats starts periodic server reporting.
func reportUsageStats(bi chronograf.BuildInfo, logger chronograf.Logger) {
rand.Seed(time.Now().UTC().UnixNano())
serverID := strconv.FormatUint(uint64(rand.Int63()), 10)
reporter := client.New("")
values := client.Values{
"os": runtime.GOOS,
"arch": runtime.GOARCH,
"version": bi.Version,
"cluster_id": serverID,
"uptime": time.Since(startTime).Seconds(),
}
l := logger.WithField("component", "usage").
WithField("reporting_addr", reporter.URL).
WithField("freq", "24h").
WithField("stats", "os,arch,version,cluster_id,uptime")
l.Info("Reporting usage stats")
_, _ = reporter.Save(clientUsage(values))
ticker := time.NewTicker(24 * time.Hour)
defer ticker.Stop()
for {
<-ticker.C
values["uptime"] = time.Since(startTime).Seconds()
l.Debug("Reporting usage stats")
go reporter.Save(clientUsage(values))
}
}
func clientUsage(values client.Values) *client.Usage {
return &client.Usage{
Product: "chronograf-ng",
Data: []client.UsageData{
{
Values: values,
},
},
}
}
func validBasepath(basepath string) bool {
re := regexp.MustCompile(`(\/{1}[\w-]+)+`)
return re.ReplaceAllLiteralString(basepath, "") == ""
}

View File

@ -1,75 +0,0 @@
package server
import (
"context"
"net/http"
"testing"
"github.com/bouk/httprouter"
)
// WithContext is a helper function to cut down on boilerplate in server test files
func WithContext(ctx context.Context, r *http.Request, kv map[string]string) *http.Request {
params := make(httprouter.Params, 0, len(kv))
for k, v := range kv {
params = append(params, httprouter.Param{
Key: k,
Value: v,
})
}
return r.WithContext(httprouter.WithParams(ctx, params))
}
func Test_validBasepath(t *testing.T) {
type args struct {
basepath string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "Basepath can be empty",
args: args{
basepath: "",
},
want: true,
},
{
name: "Basepath is not empty and valid",
args: args{
basepath: "/russ",
},
want: true,
},
{
name: "Basepath can include numbers, hyphens, and underscores",
args: args{
basepath: "/3shishka-bob/-rus4s_rus-1_s-",
},
want: true,
},
{
name: "Basepath is not empty and invalid - no slashes",
args: args{
basepath: "russ",
},
want: false,
},
{
name: "Basepath is not empty and invalid - extra slashes",
args: args{
basepath: "//russ//",
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := validBasepath(tt.args.basepath); got != tt.want {
t.Errorf("validBasepath() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,60 +0,0 @@
package server
import (
"context"
"strings"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/enterprise"
"github.com/influxdata/influxdb/v2/chronograf/influx"
)
// Service handles REST calls to the persistence
type Service struct {
Store DataStore
TimeSeriesClient TimeSeriesClient
Logger chronograf.Logger
UseAuth bool
SuperAdminProviderGroups superAdminProviderGroups
Env chronograf.Environment
Databases chronograf.Databases
}
type superAdminProviderGroups struct {
auth0 string
}
// TimeSeriesClient returns the correct client for a time series database.
type TimeSeriesClient interface {
New(chronograf.Source, chronograf.Logger) (chronograf.TimeSeries, error)
}
// ErrorMessage is the error response format for all service errors
type ErrorMessage struct {
Code int `json:"code"`
Message string `json:"message"`
}
// TimeSeries returns a new client connected to a time series database
func (s *Service) TimeSeries(src chronograf.Source) (chronograf.TimeSeries, error) {
return s.TimeSeriesClient.New(src, s.Logger)
}
// InfluxClient returns a new client to connect to OSS or Enterprise
type InfluxClient struct{}
// New creates a client to connect to OSS or enterprise
func (c *InfluxClient) New(src chronograf.Source, logger chronograf.Logger) (chronograf.TimeSeries, error) {
client := &influx.Client{
Logger: logger,
}
if err := client.Connect(context.TODO(), &src); err != nil {
return nil, err
}
if src.Type == chronograf.InfluxEnterprise && src.MetaURL != "" {
tls := strings.Contains(src.MetaURL, "https")
insecure := src.InsecureSkipVerify
return enterprise.NewClientWithTimeSeries(logger, src.MetaURL, influx.DefaultAuthorization(&src), tls, insecure, client)
}
return client, nil
}

View File

@ -1,352 +0,0 @@
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/flux"
)
type postServiceRequest struct {
Name *string `json:"name"` // User facing name of service instance.; Required: true
URL *string `json:"url"` // URL for the service backend (e.g. http://localhost:9092);/ Required: true
Type *string `json:"type"` // Type is the kind of service (e.g. flux); Required
Username string `json:"username,omitempty"` // Username for authentication to service
Password string `json:"password,omitempty"`
InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the service is accepted.
Organization string `json:"organization"` // Organization is the organization ID that resource belongs to
Metadata map[string]interface{} `json:"metadata"` // Metadata is any other data that the frontend wants to store about this service
}
func (p *postServiceRequest) Valid(defaultOrgID string) error {
if p.Name == nil || p.URL == nil {
return fmt.Errorf("name and url required")
}
if p.Type == nil {
return fmt.Errorf("type required")
}
if p.Organization == "" {
p.Organization = defaultOrgID
}
url, err := url.ParseRequestURI(*p.URL)
if err != nil {
return fmt.Errorf("invalid source URI: %v", err)
}
if len(url.Scheme) == 0 {
return fmt.Errorf("invalid URL; no URL scheme defined")
}
return nil
}
type serviceLinks struct {
Proxy string `json:"proxy"` // URL location of proxy endpoint for this source
Self string `json:"self"` // Self link mapping to this resource
Source string `json:"source"` // URL location of the parent source
}
type service struct {
ID int `json:"id,string"` // Unique identifier representing a service instance.
SrcID int `json:"sourceID,string"` // SrcID of the data source
Name string `json:"name"` // User facing name of service instance.
URL string `json:"url"` // URL for the service backend (e.g. http://localhost:9092)
Username string `json:"username,omitempty"` // Username for authentication to service
Password string `json:"password,omitempty"`
InsecureSkipVerify bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the service is accepted.
Type string `json:"type"` // Type is the kind of service (e.g. flux)
Metadata map[string]interface{} `json:"metadata"` // Metadata is any other data that the frontend wants to store about this service
Links serviceLinks `json:"links"` // Links are URI locations related to service
}
func newService(srv chronograf.Server) service {
if srv.Metadata == nil {
srv.Metadata = make(map[string]interface{})
}
httpAPISrcs := "/chronograf/v1/sources"
return service{
ID: srv.ID,
SrcID: srv.SrcID,
Name: srv.Name,
Username: srv.Username,
URL: srv.URL,
InsecureSkipVerify: srv.InsecureSkipVerify,
Type: srv.Type,
Metadata: srv.Metadata,
Links: serviceLinks{
Self: fmt.Sprintf("%s/%d/services/%d", httpAPISrcs, srv.SrcID, srv.ID),
Source: fmt.Sprintf("%s/%d", httpAPISrcs, srv.SrcID),
Proxy: fmt.Sprintf("%s/%d/services/%d/proxy", httpAPISrcs, srv.SrcID, srv.ID),
},
}
}
type services struct {
Services []service `json:"services"`
}
// NewService adds valid service store store.
func (s *Service) NewService(w http.ResponseWriter, r *http.Request) {
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
_, err = s.Store.Sources(ctx).Get(ctx, srcID)
if err != nil {
notFound(w, srcID, s.Logger)
return
}
var req postServiceRequest
if err = json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
defaultOrg, err := s.Store.Organizations(ctx).DefaultOrganization(ctx)
if err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
if err := req.Valid(defaultOrg.ID); err != nil {
invalidData(w, err, s.Logger)
return
}
if req.Type != nil && req.URL != nil && *req.Type == "flux" {
err := pingFlux(ctx, *req.URL, req.InsecureSkipVerify)
if err != nil {
msg := fmt.Sprintf("Unable to reach flux %s: %v", *req.URL, err)
Error(w, http.StatusGatewayTimeout, msg, s.Logger)
return
}
}
srv := chronograf.Server{
SrcID: srcID,
Name: *req.Name,
Username: req.Username,
Password: req.Password,
InsecureSkipVerify: req.InsecureSkipVerify,
URL: *req.URL,
Organization: req.Organization,
Type: *req.Type,
Metadata: req.Metadata,
}
if srv, err = s.Store.Servers(ctx).Add(ctx, srv); err != nil {
msg := fmt.Errorf("error storing service %v: %v", req, err)
unknownErrorWithMessage(w, msg, s.Logger)
return
}
res := newService(srv)
location(w, res.Links.Self)
encodeJSON(w, http.StatusCreated, res, s.Logger)
}
// Services retrieves all services from store.
func (s *Service) Services(w http.ResponseWriter, r *http.Request) {
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
mrSrvs, err := s.Store.Servers(ctx).All(ctx)
if err != nil {
Error(w, http.StatusInternalServerError, "Error loading services", s.Logger)
return
}
srvs := []service{}
for _, srv := range mrSrvs {
if srv.SrcID == srcID && srv.Type != "" {
srvs = append(srvs, newService(srv))
}
}
res := services{
Services: srvs,
}
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// ServiceID retrieves a service with ID from store.
func (s *Service) ServiceID(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
srv, err := s.Store.Servers(ctx).Get(ctx, id)
if err != nil || srv.SrcID != srcID || srv.Type == "" {
notFound(w, id, s.Logger)
return
}
res := newService(srv)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// RemoveService deletes service from store.
func (s *Service) RemoveService(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
srv, err := s.Store.Servers(ctx).Get(ctx, id)
if err != nil || srv.SrcID != srcID || srv.Type == "" {
notFound(w, id, s.Logger)
return
}
if err = s.Store.Servers(ctx).Delete(ctx, srv); err != nil {
unknownErrorWithMessage(w, err, s.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
type patchServiceRequest struct {
Name *string `json:"name,omitempty"` // User facing name of service instance.
Type *string `json:"type,omitempty"` // Type is the kind of service (e.g. flux)
URL *string `json:"url,omitempty"` // URL for the service
Username *string `json:"username,omitempty"` // Username for service auth
Password *string `json:"password,omitempty"`
InsecureSkipVerify *bool `json:"insecureSkipVerify"` // InsecureSkipVerify as true means any certificate presented by the service is accepted.
Metadata *map[string]interface{} `json:"metadata"` // Metadata is any other data that the frontend wants to store about this service
}
func (p *patchServiceRequest) Valid() error {
if p.URL != nil {
url, err := url.ParseRequestURI(*p.URL)
if err != nil {
return fmt.Errorf("invalid service URI: %v", err)
}
if len(url.Scheme) == 0 {
return fmt.Errorf("invalid URL; no URL scheme defined")
}
}
if p.Type != nil && *p.Type == "" {
return fmt.Errorf("invalid type; type must not be an empty string")
}
return nil
}
// UpdateService incrementally updates a service definition in the store
func (s *Service) UpdateService(w http.ResponseWriter, r *http.Request) {
id, err := paramID("kid", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
srv, err := s.Store.Servers(ctx).Get(ctx, id)
if err != nil || srv.SrcID != srcID || srv.Type == "" {
notFound(w, id, s.Logger)
return
}
var req patchServiceRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := req.Valid(); err != nil {
invalidData(w, err, s.Logger)
return
}
if req.Name != nil {
srv.Name = *req.Name
}
if req.Type != nil {
srv.Type = *req.Type
}
if req.URL != nil {
srv.URL = *req.URL
}
if req.Password != nil {
srv.Password = *req.Password
}
if req.Username != nil {
srv.Username = *req.Username
}
if req.InsecureSkipVerify != nil {
srv.InsecureSkipVerify = *req.InsecureSkipVerify
}
if req.Metadata != nil {
srv.Metadata = *req.Metadata
}
if srv.Type == "flux" {
err := pingFlux(ctx, srv.URL, srv.InsecureSkipVerify)
if err != nil {
msg := fmt.Sprintf("Unable to reach flux %s: %v", srv.URL, err)
Error(w, http.StatusGatewayTimeout, msg, s.Logger)
return
}
}
if err := s.Store.Servers(ctx).Update(ctx, srv); err != nil {
msg := fmt.Sprintf("Error updating service ID %d", id)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
res := newService(srv)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
func pingFlux(ctx context.Context, address string, insecureSkipVerify bool) error {
url, err := url.ParseRequestURI(address)
if err != nil {
return fmt.Errorf("invalid service URI: %v", err)
}
client := &flux.Client{
URL: url,
InsecureSkipVerify: insecureSkipVerify,
}
return client.Ping(ctx)
}

View File

@ -1,289 +0,0 @@
package server
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/noop"
"github.com/influxdata/influxdb/v2/chronograf/organizations"
"github.com/influxdata/influxdb/v2/chronograf/roles"
)
// hasOrganizationContext retrieves organization specified on context
// under the organizations.ContextKey
func hasOrganizationContext(ctx context.Context) (string, bool) {
// prevents panic in case of nil context
if ctx == nil {
return "", false
}
orgID, ok := ctx.Value(organizations.ContextKey).(string)
// should never happen
if !ok {
return "", false
}
if orgID == "" {
return "", false
}
return orgID, true
}
// hasRoleContext retrieves organization specified on context
// under the organizations.ContextKey
func hasRoleContext(ctx context.Context) (string, bool) {
// prevents panic in case of nil context
if ctx == nil {
return "", false
}
role, ok := ctx.Value(roles.ContextKey).(string)
// should never happen
if !ok {
return "", false
}
switch role {
case roles.MemberRoleName, roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName:
return role, true
default:
return "", false
}
}
type userContextKey string
// UserContextKey is the context key for retrieving the user off of context
const UserContextKey = userContextKey("user")
// hasUserContext specifies if the context contains
// the UserContextKey and that the value stored there is chronograf.User
func hasUserContext(ctx context.Context) (*chronograf.User, bool) {
// prevents panic in case of nil context
if ctx == nil {
return nil, false
}
u, ok := ctx.Value(UserContextKey).(*chronograf.User)
// should never happen
if !ok {
return nil, false
}
if u == nil {
return nil, false
}
return u, true
}
// hasSuperAdminContext specifies if the context contains
// the UserContextKey user is a super admin
func hasSuperAdminContext(ctx context.Context) bool {
u, ok := hasUserContext(ctx)
if !ok {
return false
}
return u.SuperAdmin
}
// DataStore is collection of resources that are used by the Service
// Abstracting this into an interface was useful for isolated testing
type DataStore interface {
Sources(ctx context.Context) chronograf.SourcesStore
Servers(ctx context.Context) chronograf.ServersStore
Layouts(ctx context.Context) chronograf.LayoutsStore
Users(ctx context.Context) chronograf.UsersStore
Organizations(ctx context.Context) chronograf.OrganizationsStore
Mappings(ctx context.Context) chronograf.MappingsStore
Dashboards(ctx context.Context) chronograf.DashboardsStore
Config(ctx context.Context) chronograf.ConfigStore
OrganizationConfig(ctx context.Context) chronograf.OrganizationConfigStore
}
// ensure that Store implements a DataStore
var _ DataStore = &Store{}
// Store implements the DataStore interface
type Store struct {
SourcesStore chronograf.SourcesStore
ServersStore chronograf.ServersStore
LayoutsStore chronograf.LayoutsStore
UsersStore chronograf.UsersStore
DashboardsStore chronograf.DashboardsStore
MappingsStore chronograf.MappingsStore
OrganizationsStore chronograf.OrganizationsStore
ConfigStore chronograf.ConfigStore
OrganizationConfigStore chronograf.OrganizationConfigStore
}
// Sources returns a noop.SourcesStore if the context has no organization specified
// and an organization.SourcesStore otherwise.
func (s *Store) Sources(ctx context.Context) chronograf.SourcesStore {
if isServer := hasServerContext(ctx); isServer {
return s.SourcesStore
}
if org, ok := hasOrganizationContext(ctx); ok {
return organizations.NewSourcesStore(s.SourcesStore, org)
}
return &noop.SourcesStore{}
}
// Servers returns a noop.ServersStore if the context has no organization specified
// and an organization.ServersStore otherwise.
func (s *Store) Servers(ctx context.Context) chronograf.ServersStore {
if isServer := hasServerContext(ctx); isServer {
return s.ServersStore
}
if org, ok := hasOrganizationContext(ctx); ok {
return organizations.NewServersStore(s.ServersStore, org)
}
return &noop.ServersStore{}
}
// Layouts returns all layouts in the underlying layouts store.
func (s *Store) Layouts(ctx context.Context) chronograf.LayoutsStore {
return s.LayoutsStore
}
// Users returns a chronograf.UsersStore.
// If the context is a server context, then the underlying chronograf.UsersStore
// is returned.
// If there is an organization specified on context, then an organizations.UsersStore
// is returned.
// If neither are specified, a noop.UsersStore is returned.
func (s *Store) Users(ctx context.Context) chronograf.UsersStore {
if isServer := hasServerContext(ctx); isServer {
return s.UsersStore
}
if org, ok := hasOrganizationContext(ctx); ok {
return organizations.NewUsersStore(s.UsersStore, org)
}
return &noop.UsersStore{}
}
// Dashboards returns a noop.DashboardsStore if the context has no organization specified
// and an organization.DashboardsStore otherwise.
func (s *Store) Dashboards(ctx context.Context) chronograf.DashboardsStore {
if isServer := hasServerContext(ctx); isServer {
return s.DashboardsStore
}
if org, ok := hasOrganizationContext(ctx); ok {
return organizations.NewDashboardsStore(s.DashboardsStore, org)
}
return &noop.DashboardsStore{}
}
// OrganizationConfig returns a noop.OrganizationConfigStore if the context has no organization specified
// and an organization.OrganizationConfigStore otherwise.
func (s *Store) OrganizationConfig(ctx context.Context) chronograf.OrganizationConfigStore {
if orgID, ok := hasOrganizationContext(ctx); ok {
return organizations.NewOrganizationConfigStore(s.OrganizationConfigStore, orgID)
}
return &noop.OrganizationConfigStore{}
}
// Organizations returns the underlying OrganizationsStore.
func (s *Store) Organizations(ctx context.Context) chronograf.OrganizationsStore {
if isServer := hasServerContext(ctx); isServer {
return s.OrganizationsStore
}
if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin {
return s.OrganizationsStore
}
if org, ok := hasOrganizationContext(ctx); ok {
return organizations.NewOrganizationsStore(s.OrganizationsStore, org)
}
return &noop.OrganizationsStore{}
}
// Config returns the underlying ConfigStore.
func (s *Store) Config(ctx context.Context) chronograf.ConfigStore {
if isServer := hasServerContext(ctx); isServer {
return s.ConfigStore
}
if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin {
return s.ConfigStore
}
return &noop.ConfigStore{}
}
// Mappings returns the underlying MappingsStore.
func (s *Store) Mappings(ctx context.Context) chronograf.MappingsStore {
if isServer := hasServerContext(ctx); isServer {
return s.MappingsStore
}
if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin {
return s.MappingsStore
}
return &noop.MappingsStore{}
}
// ensure that DirectStore implements a DataStore
var _ DataStore = &DirectStore{}
// Store implements the DataStore interface
type DirectStore struct {
SourcesStore chronograf.SourcesStore
ServersStore chronograf.ServersStore
LayoutsStore chronograf.LayoutsStore
UsersStore chronograf.UsersStore
DashboardsStore chronograf.DashboardsStore
MappingsStore chronograf.MappingsStore
OrganizationsStore chronograf.OrganizationsStore
ConfigStore chronograf.ConfigStore
OrganizationConfigStore chronograf.OrganizationConfigStore
}
// Sources returns a noop.SourcesStore if the context has no organization specified
// and an organization.SourcesStore otherwise.
func (s *DirectStore) Sources(ctx context.Context) chronograf.SourcesStore {
return s.SourcesStore
}
// Servers returns a noop.ServersStore if the context has no organization specified
// and an organization.ServersStore otherwise.
func (s *DirectStore) Servers(ctx context.Context) chronograf.ServersStore {
return s.ServersStore
}
// Layouts returns all layouts in the underlying layouts store.
func (s *DirectStore) Layouts(ctx context.Context) chronograf.LayoutsStore {
return s.LayoutsStore
}
// Users returns a chronograf.UsersStore.
// If the context is a server context, then the underlying chronograf.UsersStore
// is returned.
// If there is an organization specified on context, then an organizations.UsersStore
// is returned.
// If neither are specified, a noop.UsersStore is returned.
func (s *DirectStore) Users(ctx context.Context) chronograf.UsersStore {
return s.UsersStore
}
// Dashboards returns a noop.DashboardsStore if the context has no organization specified
// and an organization.DashboardsStore otherwise.
func (s *DirectStore) Dashboards(ctx context.Context) chronograf.DashboardsStore {
return s.DashboardsStore
}
// OrganizationConfig returns a noop.OrganizationConfigStore if the context has no organization specified
// and an organization.OrganizationConfigStore otherwise.
func (s *DirectStore) OrganizationConfig(ctx context.Context) chronograf.OrganizationConfigStore {
return s.OrganizationConfigStore
}
// Organizations returns the underlying OrganizationsStore.
func (s *DirectStore) Organizations(ctx context.Context) chronograf.OrganizationsStore {
return s.OrganizationsStore
}
// Config returns the underlying ConfigStore.
func (s *DirectStore) Config(ctx context.Context) chronograf.ConfigStore {
return s.ConfigStore
}
// Mappings returns the underlying MappingsStore.
func (s *DirectStore) Mappings(ctx context.Context) chronograf.MappingsStore {
return s.MappingsStore
}

View File

@ -1,428 +0,0 @@
package server
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
"github.com/influxdata/influxdb/v2/chronograf/organizations"
)
func TestStore_SourcesGet(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
}
type args struct {
organization string
id int
}
type wants struct {
source chronograf.Source
err bool
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "Get source",
fields: fields{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, id int) (chronograf.Source, error) {
return chronograf.Source{
ID: 1,
Name: "my sweet name",
Organization: "0",
}, nil
},
},
},
args: args{
organization: "0",
},
wants: wants{
source: chronograf.Source{
ID: 1,
Name: "my sweet name",
Organization: "0",
},
},
},
{
name: "Get source - no organization specified on context",
fields: fields{
SourcesStore: &mocks.SourcesStore{
GetF: func(ctx context.Context, id int) (chronograf.Source, error) {
return chronograf.Source{
ID: 1,
Name: "my sweet name",
Organization: "0",
}, nil
},
},
},
args: args{},
wants: wants{
err: true,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
store := &Store{
SourcesStore: tt.fields.SourcesStore,
}
ctx := context.Background()
if tt.args.organization != "" {
ctx = context.WithValue(ctx, organizations.ContextKey, tt.args.organization)
}
source, err := store.Sources(ctx).Get(ctx, tt.args.id)
if (err != nil) != tt.wants.err {
t.Errorf("%q. Store.Sources().Get() error = %v, wantErr %v", tt.name, err, tt.wants.err)
return
}
if diff := cmp.Diff(source, tt.wants.source); diff != "" {
t.Errorf("%q. Store.Sources().Get():\n-got/+want\ndiff %s", tt.name, diff)
}
})
}
}
func TestStore_SourcesAll(t *testing.T) {
type fields struct {
SourcesStore chronograf.SourcesStore
}
type args struct {
organization string
}
type wants struct {
sources []chronograf.Source
err bool
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "Get sources",
fields: fields{
SourcesStore: &mocks.SourcesStore{
AllF: func(ctx context.Context) ([]chronograf.Source, error) {
return []chronograf.Source{
{
ID: 1,
Name: "my sweet name",
Organization: "0",
},
}, nil
},
},
},
args: args{
organization: "0",
},
wants: wants{
sources: []chronograf.Source{
{
ID: 1,
Name: "my sweet name",
Organization: "0",
},
},
},
},
{
name: "Get sources - multiple orgs",
fields: fields{
SourcesStore: &mocks.SourcesStore{
AllF: func(ctx context.Context) ([]chronograf.Source, error) {
return []chronograf.Source{
{
ID: 1,
Name: "my sweet name",
Organization: "0",
},
{
ID: 2,
Name: "A bad source",
Organization: "0",
},
{
ID: 3,
Name: "A good source",
Organization: "0",
},
{
ID: 4,
Name: "a source I can has",
Organization: "0",
},
{
ID: 5,
Name: "i'm in the wrong org",
Organization: "1",
},
}, nil
},
},
},
args: args{
organization: "0",
},
wants: wants{
sources: []chronograf.Source{
{
ID: 1,
Name: "my sweet name",
Organization: "0",
},
{
ID: 2,
Name: "A bad source",
Organization: "0",
},
{
ID: 3,
Name: "A good source",
Organization: "0",
},
{
ID: 4,
Name: "a source I can has",
Organization: "0",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
store := &Store{
SourcesStore: tt.fields.SourcesStore,
}
ctx := context.Background()
if tt.args.organization != "" {
ctx = context.WithValue(ctx, organizations.ContextKey, tt.args.organization)
}
sources, err := store.Sources(ctx).All(ctx)
if (err != nil) != tt.wants.err {
t.Errorf("%q. Store.Sources().Get() error = %v, wantErr %v", tt.name, err, tt.wants.err)
return
}
if diff := cmp.Diff(sources, tt.wants.sources); diff != "" {
t.Errorf("%q. Store.Sources().Get():\n-got/+want\ndiff %s", tt.name, diff)
}
})
}
}
func TestStore_OrganizationsAdd(t *testing.T) {
type fields struct {
OrganizationsStore chronograf.OrganizationsStore
}
type args struct {
orgID string
serverContext bool
organization string
user *chronograf.User
}
type wants struct {
organization *chronograf.Organization
err bool
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "Get organization with server context",
fields: fields{
OrganizationsStore: &mocks.OrganizationsStore{
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "21",
Name: "my sweet name",
DefaultRole: "viewer",
}, nil
},
},
},
args: args{
serverContext: true,
orgID: "21",
},
wants: wants{
organization: &chronograf.Organization{
ID: "21",
Name: "my sweet name",
DefaultRole: "viewer",
},
},
},
{
name: "Get organization with super admin",
fields: fields{
OrganizationsStore: &mocks.OrganizationsStore{
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "21",
Name: "my sweet name",
DefaultRole: "viewer",
}, nil
},
},
},
args: args{
user: &chronograf.User{
ID: 1337,
Name: "bobbetta",
Provider: "github",
Scheme: "oauth2",
SuperAdmin: true,
},
orgID: "21",
},
wants: wants{
organization: &chronograf.Organization{
ID: "21",
Name: "my sweet name",
DefaultRole: "viewer",
},
},
},
{
name: "Get organization not as super admin no organization",
fields: fields{
OrganizationsStore: &mocks.OrganizationsStore{
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "21",
Name: "my sweet name",
DefaultRole: "viewer",
}, nil
},
},
},
args: args{
user: &chronograf.User{
ID: 1337,
Name: "bobbetta",
Provider: "github",
Scheme: "oauth2",
},
orgID: "21",
},
wants: wants{
err: true,
},
},
{
name: "Get organization not as super admin with organization",
fields: fields{
OrganizationsStore: &mocks.OrganizationsStore{
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "22",
Name: "my sweet name",
DefaultRole: "viewer",
}, nil
},
},
},
args: args{
user: &chronograf.User{
ID: 1337,
Name: "bobbetta",
Provider: "github",
Scheme: "oauth2",
},
organization: "22",
orgID: "22",
},
wants: wants{
organization: &chronograf.Organization{
ID: "22",
Name: "my sweet name",
DefaultRole: "viewer",
},
},
},
{
name: "Get different organization not as super admin with organization",
fields: fields{
OrganizationsStore: &mocks.OrganizationsStore{
GetF: func(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
return &chronograf.Organization{
ID: "22",
Name: "my sweet name",
DefaultRole: "viewer",
}, nil
},
},
},
args: args{
user: &chronograf.User{
ID: 1337,
Name: "bobbetta",
Provider: "github",
Scheme: "oauth2",
},
organization: "21",
orgID: "21",
},
wants: wants{
err: true,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
store := &Store{
OrganizationsStore: tt.fields.OrganizationsStore,
}
ctx := context.Background()
if tt.args.serverContext {
ctx = serverContext(ctx)
}
if tt.args.organization != "" {
ctx = context.WithValue(ctx, organizations.ContextKey, tt.args.organization)
}
if tt.args.user != nil {
ctx = context.WithValue(ctx, UserContextKey, tt.args.user)
}
organization, err := store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &tt.args.orgID})
if (err != nil) != tt.wants.err {
t.Errorf("%q. Store.Organizations().Get() error = %v, wantErr %v", tt.name, err, tt.wants.err)
return
}
if diff := cmp.Diff(organization, tt.wants.organization); diff != "" {
t.Errorf("%q. Store.Organizations().Get():\n-got/+want\ndiff %s", tt.name, diff)
}
})
}
}

View File

@ -1,20 +0,0 @@
package server
//go:generate env GO111MODULE=on go run github.com/kevinburke/go-bindata/go-bindata -o swagger_gen.go -tags assets -ignore go -nocompress -pkg server .
import "net/http"
// Spec servers the swagger.json file from bindata
func Spec() http.HandlerFunc {
swagger, err := Asset("swagger.json")
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
_, _ = w.Write(swagger)
})
}

File diff suppressed because it is too large Load Diff

View File

@ -1,640 +0,0 @@
openapi: "3.0.0"
info:
title: Chronograf
version: 1.5.0.0
servers:
- url: /chronograf/v2
paths:
/cells:
post:
tags:
- Cells
summary: Create a cell
requestBody:
description: cell to create
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Cell"
responses:
'201':
description: Added cell
content:
application/json:
schema:
$ref: "#/components/schemas/Cell"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
get:
tags:
- Cells
summary: Get all cells
responses:
'200':
description: all cells
content:
application/json:
schema:
$ref: "#/components/schemas/Cells"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/cells/{cellID}':
get:
tags:
- Cells
summary: Get a single Cell
parameters:
- in: path
name: cellID
schema:
type: string
required: true
description: ID of cell to update
responses:
'200':
description: get a single cell
content:
application/json:
schema:
$ref: "#/components/schemas/Cell"
'404':
description: cell not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
patch:
tags:
- Cells
summary: Update a single cell
requestBody:
description: patching of a cell
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Cell"
parameters:
- in: path
name: cellID
schema:
type: string
required: true
description: ID of cell to update
responses:
'200':
description: Updated cell
content:
application/json:
schema:
$ref: "#/components/schemas/Cell"
'404':
description: cell not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
delete:
tags:
- Cells
summary: Delete a cell
parameters:
- in: path
name: cellID
schema:
type: string
required: true
description: ID of cell to update
responses:
'204':
description: delete has been accepted
'404':
description: cell not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/dashboards:
post:
tags:
- Dashboards
summary: Create a dashboard
requestBody:
description: dashboard to create
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Dashboard"
responses:
'201':
description: Added dashboard
content:
application/json:
schema:
$ref: "#/components/schemas/Dashboard"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
get:
tags:
- Dashboards
summary: Get all dashboards
responses:
'200':
description: all dashboards
content:
application/json:
schema:
$ref: "#/components/schemas/Dashboards"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
'/dashboards/{dashboardID}':
get:
tags:
- Dashboards
summary: Get a single Dashboard
parameters:
- in: path
name: dashboardID
schema:
type: string
required: true
description: ID of dashboard to update
responses:
'200':
description: get a single dashboard
content:
application/json:
schema:
$ref: "#/components/schemas/Dashboard"
'404':
description: dashboard not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
patch:
tags:
- Dashboards
summary: Update a single dashboard
requestBody:
description: patching of a dashboard
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Dashboard"
parameters:
- in: path
name: dashboardID
schema:
type: string
required: true
description: ID of dashboard to update
responses:
'200':
description: Updated dashboard
content:
application/json:
schema:
$ref: "#/components/schemas/Dashboard"
'404':
description: dashboard not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
delete:
tags:
- Dashboards
summary: Delete a dashboard
parameters:
- in: path
name: dashboardID
schema:
type: string
required: true
description: ID of dashboard to update
responses:
'204':
description: delete has been accepted
'404':
description: dashboard not found
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: unexpected error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
components:
schemas:
Link:
type: object
readOnly: true
description: URI of resource.
properties:
href:
type: string
format: url
required: [href]
Links:
type: object
readOnly: true
properties:
self:
$ref: "#/components/schemas/Link"
required: [self]
Field:
type: object
properties:
value:
description: >-
value is the value of the field. Meaning of the value is implied by
the `type` key
type: string
type:
description: >-
type describes the field type. func is a function; field is a field
reference
type: string
enum:
- func
- field
- integer
- number
- regex
- wildcard
alias:
description: >-
Alias overrides the field name in the returned response. Applies only
if type is `func`
type: string
args:
description: Args are the arguments to the function
type: array
items:
$ref: '#/components/schemas/Field'
QueryConfig:
type: object
required:
- database
- measurement
- retentionPolicy
- areTagsAccepted
- tags
- groupBy
- fields
properties:
id:
type: string
database:
type: string
measurement:
type: string
retentionPolicy:
type: string
areTagsAccepted:
type: boolean
rawText:
type: string
tags:
type: object
groupBy:
type: object
properties:
time:
type: string
tags:
type: array
items:
type: string
required:
- time
- tags
fields:
type: array
items:
$ref: '#/components/schemas/Field'
range:
type: object
properties:
lower:
type: string
upper:
type: string
required:
- lower
- upper
DashboardQuery:
type: object
required:
- query
properties:
label:
type: string
description: Optional Y-axis user-facing label
range:
description: Optional default range of the Y-axis
type: object
required:
- upper
- lower
properties:
upper:
description: Upper bound of the display range of the Y-axis
type: integer
format: int64
lower:
description: Lower bound of the display range of the Y-axis
type: integer
format: int64
query:
type: string
source:
type: string
format: url
description: Optional URI for data source for this query
queryConfig:
$ref: '#/components/schemas/QueryConfig'
name:
type: string
description: An optional word or phrase that refers to the query
Axis:
type: object
description: A description of a particular axis for a visualization
properties:
bounds:
type: array
minItems: 0
maxItems: 2
description: >-
The extents of an axis in the form [lower, upper]. Clients determine
whether bounds are to be inclusive or exclusive of their limits
items:
type: integer
format: int64
label:
description: label is a description of this Axis
type: string
prefix:
description: Prefix represents a label prefix for formatting axis values.
type: string
suffix:
description: Suffix represents a label suffix for formatting axis values.
type: string
base:
description: Base represents the radix for formatting axis values.
type: string
scale:
description: 'Scale is the axis formatting scale. Supported: "log", "linear"'
type: string
DashboardColor:
type: object
description: Color defines an encoding of data value into color space
properties:
id:
description: ID is the unique id of the cell color
type: string
type:
description: Type is how the color is used.
type: string
enum:
- min
- max
- threshold
hex:
description: Hex is the hex number of the color
type: string
maxLength: 7
minLength: 7
name:
description: Name is the user-facing name of the hex color
type: string
value:
description: Value is the data value mapped to this color
type: number
format: float
RenamableField:
description: Describes a field that can be renamed and made visible or invisible
type: object
properties:
internalName:
description: This is the calculated name of a field
readOnly: true
type: string
displayName:
description: This is the name that a field is renamed to by the user
type: string
visible:
description: Indicates whether this field should be visible on the table
type: boolean
V1Visualization:
properties:
type:
type: string
enum: ["chronograf-v1"]
queries:
type: array
items:
$ref: "#/components/schemas/DashboardQuery"
axes:
description: The viewport for a Cell's visualizations
type: object
properties:
x:
$ref: '#/components/schemas/Axis'
y:
$ref: '#/components/schemas/Axis'
y2:
$ref: '#/components/schemas/Axis'
graphType:
description: The viewport for a cell's graph/visualization
type: string
enum:
- single-stat
- line
- line-plus-single-stat
- line-stacked
- line-stepplot
- bar
- gauge
- table
default: line
colors:
description: Colors define color encoding of data into a visualization
type: array
items:
$ref: "#/components/schemas/DashboardColor"
tableOptions:
properties:
verticalTimeAxis:
description: >-
verticalTimeAxis describes the orientation of the table by
indicating whether the time axis will be displayed vertically
type: boolean
sortBy:
$ref: "#/components/schemas/RenamableField"
wrapping:
description: wrapping describes the text wrapping style to be used in table cells
type: string
enum:
- truncate
- wrap
- single-line
fixFirstColumn:
description: >-
fixFirstColumn indicates whether the first column of the table
should be locked
type: boolean
fieldOptions:
description: >-
fieldOptions represent the fields retrieved by the query with
customization options
type: array
items:
$ref: '#/components/schemas/RenamableField'
timeFormat:
description: >-
timeFormat describes the display format for time values according to
moment.js date formatting
type: string
decimalPoints:
description: >-
decimal points indicates whether and how many digits to show after
decimal point
type: object
properties:
isEnforced:
description: Indicates whether decimal point setting should be enforced
type: boolean
digits:
description: The number of digists after decimal to display
type: integer
EmptyVisualization:
properties:
type:
type: string
enum: ["empty"]
Cell:
properties:
links:
$ref: "#/components/schemas/Links"
id:
readOnly: true
type: string
name:
type: string
visualization:
oneOf:
- $ref: "#/components/schemas/V1Visualization"
- $ref: "#/components/schemas/EmptyVisualization"
Cells:
type: object
properties:
links:
$ref: "#/components/schemas/Links"
cells:
type: array
items:
$ref: "#/components/schemas/Cell"
DashboardCell:
type: object
properties:
x:
type: integer
format: int32
y:
type: integer
format: int32
w:
type: integer
format: int32
h:
type: integer
format: int32
ref:
type: string
description: The reference to a cell from the cells API
Dashboard:
properties:
links:
$ref: "#/components/schemas/Links"
id:
readOnly: true
type: string
name:
type: string
cells:
type: array
items:
$ref: "#/components/schemas/DashboardCell"
Dashboards:
type: object
properties:
links:
$ref: "#/components/schemas/Links"
dashboards:
type: array
items:
$ref: "#/components/schemas/Dashboards"
Error:
properties:
code:
readOnly: true
type: integer
format: int32
message:
readOnly: true
type: string
required: [code, message]

View File

@ -1,252 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"net/http"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
idgen "github.com/influxdata/influxdb/v2/chronograf/id"
)
// ValidTemplateRequest checks if the request sent to the server is the correct format.
func ValidTemplateRequest(template *chronograf.Template) error {
switch template.Type {
default:
return fmt.Errorf("unknown template type %s", template.Type)
case "constant", "csv", "fieldKeys", "tagKeys", "tagValues", "measurements", "databases", "map", "influxql", "text":
}
for _, v := range template.Values {
switch v.Type {
default:
return fmt.Errorf("unknown template variable type %s", v.Type)
case "csv", "map", "fieldKey", "tagKey", "tagValue", "measurement", "database", "constant", "influxql":
}
if template.Type == "map" && v.Key == "" {
return fmt.Errorf("templates of type 'map' require a 'key'")
}
}
if template.Type == "influxql" && template.Query == nil {
return fmt.Errorf("no query set for template of type 'influxql'")
}
return nil
}
type templateLinks struct {
Self string `json:"self"` // Self link mapping to this resource
}
type templateResponse struct {
chronograf.Template
Links templateLinks `json:"links"`
}
func newTemplateResponses(dID chronograf.DashboardID, tmps []chronograf.Template) []templateResponse {
res := make([]templateResponse, len(tmps))
for i, t := range tmps {
res[i] = newTemplateResponse(dID, t)
}
return res
}
type templatesResponses struct {
Templates []templateResponse `json:"templates"`
}
func newTemplateResponse(dID chronograf.DashboardID, tmp chronograf.Template) templateResponse {
base := "/chronograf/v1/dashboards"
return templateResponse{
Template: tmp,
Links: templateLinks{
Self: fmt.Sprintf("%s/%d/templates/%s", base, dID, tmp.ID),
},
}
}
// Templates returns all templates from a dashboard within the store
func (s *Service) Templates(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
d, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
res := templatesResponses{
Templates: newTemplateResponses(chronograf.DashboardID(id), d.Templates),
}
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// NewTemplate adds a template to an existing dashboard
func (s *Service) NewTemplate(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
var template chronograf.Template
if err := json.NewDecoder(r.Body).Decode(&template); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := ValidTemplateRequest(&template); err != nil {
invalidData(w, err, s.Logger)
return
}
ids := idgen.UUID{}
tid, err := ids.Generate()
if err != nil {
msg := fmt.Sprintf("Error creating template ID for dashboard %d: %v", id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
template.ID = chronograf.TemplateID(tid)
dash.Templates = append(dash.Templates, template)
if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil {
msg := fmt.Sprintf("Error adding template %s to dashboard %d: %v", tid, id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
res := newTemplateResponse(dash.ID, template)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// TemplateID retrieves a specific template from a dashboard
func (s *Service) TemplateID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
tid := httprouter.GetParamFromContext(ctx, "tid")
for _, t := range dash.Templates {
if t.ID == chronograf.TemplateID(tid) {
res := newTemplateResponse(chronograf.DashboardID(id), t)
encodeJSON(w, http.StatusOK, res, s.Logger)
return
}
}
notFound(w, id, s.Logger)
}
// RemoveTemplate removes a specific template from an existing dashboard
func (s *Service) RemoveTemplate(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
tid := httprouter.GetParamFromContext(ctx, "tid")
pos := -1
for i, t := range dash.Templates {
if t.ID == chronograf.TemplateID(tid) {
pos = i
break
}
}
if pos == -1 {
notFound(w, id, s.Logger)
return
}
dash.Templates = append(dash.Templates[:pos], dash.Templates[pos+1:]...)
if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil {
msg := fmt.Sprintf("Error removing template %s from dashboard %d: %v", tid, id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// ReplaceTemplate replaces a template entirely within an existing dashboard
func (s *Service) ReplaceTemplate(w http.ResponseWriter, r *http.Request) {
id, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), s.Logger)
return
}
ctx := r.Context()
dash, err := s.Store.Dashboards(ctx).Get(ctx, chronograf.DashboardID(id))
if err != nil {
notFound(w, id, s.Logger)
return
}
tid := httprouter.GetParamFromContext(ctx, "tid")
pos := -1
for i, t := range dash.Templates {
if t.ID == chronograf.TemplateID(tid) {
pos = i
break
}
}
if pos == -1 {
notFound(w, id, s.Logger)
return
}
var template chronograf.Template
if err := json.NewDecoder(r.Body).Decode(&template); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := ValidTemplateRequest(&template); err != nil {
invalidData(w, err, s.Logger)
return
}
template.ID = chronograf.TemplateID(tid)
dash.Templates[pos] = template
if err := s.Store.Dashboards(ctx).Update(ctx, dash); err != nil {
msg := fmt.Sprintf("Error updating template %s in dashboard %d: %v", tid, id, err)
Error(w, http.StatusInternalServerError, msg, s.Logger)
return
}
res := newTemplateResponse(chronograf.DashboardID(id), template)
encodeJSON(w, http.StatusOK, res, s.Logger)
}

View File

@ -1,101 +0,0 @@
package server
import (
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestValidTemplateRequest(t *testing.T) {
tests := []struct {
name string
template *chronograf.Template
wantErr bool
}{
{
name: "Valid Template",
template: &chronograf.Template{
Type: "fieldKeys",
TemplateVar: chronograf.TemplateVar{
Values: []chronograf.TemplateValue{
{
Type: "fieldKey",
},
},
},
},
},
{
name: "Invalid Template Type",
wantErr: true,
template: &chronograf.Template{
Type: "Unknown Type",
TemplateVar: chronograf.TemplateVar{
Values: []chronograf.TemplateValue{
{
Type: "fieldKey",
},
},
},
},
},
{
name: "Invalid Template Variable Type",
wantErr: true,
template: &chronograf.Template{
Type: "csv",
TemplateVar: chronograf.TemplateVar{
Values: []chronograf.TemplateValue{
{
Type: "unknown value",
},
},
},
},
},
{
name: "No query set",
wantErr: true,
template: &chronograf.Template{
Type: "influxql",
},
},
{
name: "Valid Map type",
template: &chronograf.Template{
Type: "map",
TemplateVar: chronograf.TemplateVar{
Values: []chronograf.TemplateValue{
{
Key: "key",
Value: "value",
Type: "map",
},
},
},
},
},
{
name: "Map without Key",
wantErr: true,
template: &chronograf.Template{
Type: "map",
TemplateVar: chronograf.TemplateVar{
Values: []chronograf.TemplateValue{
{
Value: "value",
Type: "map",
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ValidTemplateRequest(tt.template); (err != nil) != tt.wantErr {
t.Errorf("ValidTemplateRequest() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@ -1,20 +0,0 @@
package server
import (
"encoding/json"
"github.com/google/go-cmp/cmp"
)
func jsonEqual(s1, s2 string) (eq bool, err error) {
var o1, o2 interface{}
if err = json.Unmarshal([]byte(s1), &o1); err != nil {
return
}
if err = json.Unmarshal([]byte(s2), &o2); err != nil {
return
}
return cmp.Equal(o1, o2), nil
}

View File

@ -1,192 +0,0 @@
package server
import (
"bufio"
"bytes"
"io"
"net/http"
"regexp"
"github.com/influxdata/influxdb/v2/chronograf"
)
const (
ErrNotFlusher = "Expected http.ResponseWriter to be an http.Flusher, but wasn't"
)
// URLPrefixer is a wrapper for an http.Handler that will prefix all occurrences of a relative URL with the configured Prefix
type URLPrefixer struct {
Prefix string // the prefix to be appended after any detected Attrs
Next http.Handler // the http.Handler which will generate the content to be modified by this handler
Attrs [][]byte // a list of attrs that should have their URLs prefixed. For example `src="` or `href="` would be valid
Logger chronograf.Logger // The logger where prefixing errors will be dispatched to
}
type wrapResponseWriter struct {
http.ResponseWriter
Substitute *io.PipeWriter
headerWritten bool
dupHeader *http.Header
}
func (wrw *wrapResponseWriter) Write(p []byte) (int, error) {
return wrw.Substitute.Write(p)
}
func (wrw *wrapResponseWriter) WriteHeader(code int) {
if !wrw.headerWritten {
wrw.ResponseWriter.Header().Set("Content-Type", wrw.dupHeader.Get("Content-Type"))
header := wrw.ResponseWriter.Header()
// Filter out content length header to prevent stopping writing
if wrw.dupHeader != nil {
for k, v := range *wrw.dupHeader {
if k == "Content-Length" {
continue
}
header[k] = v
}
}
wrw.headerWritten = true
}
wrw.ResponseWriter.WriteHeader(code)
}
// Header() copies the Header map from the underlying ResponseWriter to prevent
// modifications to it by callers
func (wrw *wrapResponseWriter) Header() http.Header {
if wrw.dupHeader == nil {
h := http.Header{}
origHeader := wrw.ResponseWriter.Header()
for k, v := range origHeader {
h[k] = v
}
wrw.dupHeader = &h
}
return *wrw.dupHeader
}
// ChunkSize is the number of bytes per chunked transfer-encoding
const ChunkSize int = 512
// ServeHTTP implements an http.Handler that prefixes relative URLs from the
// Next handler with the configured prefix. It does this by examining the
// stream through the ResponseWriter, and appending the Prefix after any of the
// Attrs detected in the stream.
func (up *URLPrefixer) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
// extract the flusher for flushing chunks
flusher, ok := rw.(http.Flusher)
if !ok {
up.Logger.Info(ErrNotFlusher)
up.Next.ServeHTTP(rw, r)
return
}
isSVG, _ := regexp.Match(".svg$", []byte(r.URL.String()))
if isSVG {
up.Next.ServeHTTP(rw, r)
return
}
// chunked transfer because we're modifying the response on the fly, so we
// won't know the final content-length
rw.Header().Set("Connection", "Keep-Alive")
rw.Header().Set("Transfer-Encoding", "chunked")
writtenCount := 0 // number of bytes written to rw
nextRead, nextWrite := io.Pipe()
go func() {
defer nextWrite.Close()
up.Next.ServeHTTP(&wrapResponseWriter{ResponseWriter: rw, Substitute: nextWrite}, r)
}()
// setup a buffer which is the max length of our target attrs
b := make([]byte, up.maxlen(up.Attrs...))
io.ReadFull(nextRead, b) // prime the buffer with the start of the input
buf := bytes.NewBuffer(b)
// Read next handler's response byte by byte
src := bufio.NewScanner(nextRead)
src.Split(bufio.ScanBytes)
for {
window := buf.Bytes()
// advance a byte if window is not a src attr
if matchlen, match := up.match(window, up.Attrs...); matchlen == 0 {
if src.Scan() {
// shift the next byte into buf
rw.Write(buf.Next(1))
writtenCount++
buf.Write(src.Bytes())
if writtenCount >= ChunkSize {
flusher.Flush()
writtenCount = 0
}
} else {
if err := src.Err(); err != nil {
up.Logger.
WithField("component", "prefixer").
Error("Error encountered while scanning: err:", err)
}
rw.Write(window)
flusher.Flush()
break
}
continue
} else {
buf.Next(matchlen) // advance to the relative URL
for i := 0; i < matchlen; i++ {
src.Scan()
buf.Write(src.Bytes())
}
rw.Write(match) // add the src attr to the output
io.WriteString(rw, up.Prefix) // write the prefix
}
}
}
// match compares the subject against a list of targets. If there is a match
// between any of them a non-zero value is returned. The returned value is the
// length of the match. It is assumed that subject's length > length of all
// targets. The matching []byte is also returned as the second return parameter
func (up *URLPrefixer) match(subject []byte, targets ...[]byte) (int, []byte) {
for _, target := range targets {
if bytes.Equal(subject[:len(target)], target) {
return len(target), target
}
}
return 0, []byte{}
}
// maxlen returns the length of the largest []byte provided to it as an argument
func (up *URLPrefixer) maxlen(targets ...[]byte) int {
max := 0
for _, tgt := range targets {
if tlen := len(tgt); tlen > max {
max = tlen
}
}
return max
}
// NewDefaultURLPrefixer returns a URLPrefixer that will prefix any src and
// href attributes found in HTML as well as any url() directives found in CSS
// with the provided prefix. Additionally, it will prefix any `data-basepath`
// attributes as well for informing front end logic about any prefixes. `next`
// is the next http.Handler that will have its output prefixed
func NewDefaultURLPrefixer(prefix string, next http.Handler, lg chronograf.Logger) *URLPrefixer {
return &URLPrefixer{
Prefix: prefix,
Next: next,
Logger: lg,
Attrs: [][]byte{
[]byte(`src="`),
[]byte(`href="`),
[]byte(`url(`),
[]byte(`data-basepath="`), // for forwarding basepath to frontend
},
}
}

View File

@ -1,178 +0,0 @@
package server_test
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
"github.com/influxdata/influxdb/v2/chronograf/server"
)
var prefixerTests = []struct {
name string
subject string
expected string
shouldErr bool
attrs [][]byte
}{
{
`One script tag`,
`<script type="text/javascript" src="/loljavascript.min.js">`,
`<script type="text/javascript" src="/arbitraryprefix/loljavascript.min.js">`,
false,
[][]byte{
[]byte(`src="`),
},
},
{
`Two script tags`,
`<script type="text/javascript" src="/loljavascript.min.js"><script type="text/javascript" src="/anotherscript.min.js">`,
`<script type="text/javascript" src="/arbitraryprefix/loljavascript.min.js"><script type="text/javascript" src="/arbitraryprefix/anotherscript.min.js">`,
false,
[][]byte{
[]byte(`src="`),
},
},
{
`Link href`,
`<link rel="shortcut icon" href="/favicon.ico">`,
`<link rel="shortcut icon" href="/arbitraryprefix/favicon.ico">`,
false,
[][]byte{
[]byte(`src="`),
[]byte(`href="`),
},
},
{
`Trailing HTML`,
`<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8"/>
<title>Chronograf</title>
<link rel="shortcut icon" href="/favicon.ico"><link href="/chronograf.css" rel="stylesheet"></head>
<body>
<div id='react-root'></div>
<script type="text/javascript" src="/manifest.7489452b099f9581ca1b.dev.js"></script><script type="text/javascript" src="/vendor.568c0101d870a13ecff9.dev.js"></script><script type="text/javascript" src="/app.13d0ce0b33609be3802b.dev.js"></script></body>
</html>`,
`<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8"/>
<title>Chronograf</title>
<link rel="shortcut icon" href="/arbitraryprefix/favicon.ico"><link href="/arbitraryprefix/chronograf.css" rel="stylesheet"></head>
<body>
<div id='react-root'></div>
<script type="text/javascript" src="/arbitraryprefix/manifest.7489452b099f9581ca1b.dev.js"></script><script type="text/javascript" src="/arbitraryprefix/vendor.568c0101d870a13ecff9.dev.js"></script><script type="text/javascript" src="/arbitraryprefix/app.13d0ce0b33609be3802b.dev.js"></script></body>
</html>`,
false,
[][]byte{
[]byte(`src="`),
[]byte(`href="`),
},
},
}
func Test_Server_Prefixer_RewritesURLs(t *testing.T) {
t.Parallel()
for _, test := range prefixerTests {
subject := test.subject
expected := test.expected
backend := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, subject)
})
pfx := &server.URLPrefixer{Prefix: "/arbitraryprefix", Next: backend, Attrs: test.attrs}
ts := httptest.NewServer(pfx)
defer ts.Close()
res, err := http.Get(ts.URL)
if err != nil {
t.Error("Unexpected error fetching from prefixer: err:", err)
}
actual, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Error("Unable to read prefixed body: err:", err)
}
if string(actual) != expected+"\n" {
t.Error(test.name, ":\n Unsuccessful prefixing.\n\tWant:", fmt.Sprintf("%+q", expected), "\n\tGot: ", fmt.Sprintf("%+q", string(actual)))
}
}
}
// clogger is an http.ResponseWriter that is not an http.Flusher. It is used
// for testing the behavior of handlers that may rely on specific behavior of
// http.Flusher
type clogger struct {
next http.ResponseWriter
}
func (c *clogger) Header() http.Header {
return c.next.Header()
}
func (c *clogger) Write(bytes []byte) (int, error) {
return c.next.Write(bytes)
}
func (c *clogger) WriteHeader(code int) {
c.next.WriteHeader(code)
}
func Test_Server_Prefixer_NoPrefixingWithoutFlusther(t *testing.T) {
backend := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
fmt.Fprintf(rw, "<a href=\"/valley\">Hill Valley Preservation Society</a>")
})
wrapFunc := func(next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
clog := &clogger{rw}
next.ServeHTTP(clog, r)
})
}
tl := &mocks.TestLogger{}
pfx := &server.URLPrefixer{
Prefix: "/hill",
Next: backend,
Logger: tl,
Attrs: [][]byte{
[]byte("href=\""),
},
}
ts := httptest.NewServer(wrapFunc(pfx))
defer ts.Close()
res, err := http.Get(ts.URL)
if err != nil {
t.Fatal("Unexpected error fetching from prefixer: err:", err)
}
actual, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("Unable to read prefixed body: err:", err)
}
unexpected := "<a href=\"/hill/valley\">Hill Valley Preservation Society</a>"
expected := "<a href=\"/valley\">Hill Valley Preservation Society</a>"
if string(actual) == unexpected {
t.Error("No Flusher", ":\n Prefixing occurred without an http.Flusher")
}
if string(actual) != expected {
t.Error("No Flusher", ":\n\tPrefixing failed to output without an http.Flusher\n\t\tWant:\n", expected, "\n\t\tGot:\n", string(actual))
}
if !tl.HasMessage("info", server.ErrNotFlusher) {
t.Error("No Flusher", ":\n Expected Error Message: \"", server.ErrNotFlusher, "\" but saw none. Msgs:", tl.Messages)
}
}

View File

@ -1,379 +0,0 @@
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"sort"
"strconv"
"github.com/bouk/httprouter"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/roles"
)
type userRequest struct {
ID uint64 `json:"id,string"`
Name string `json:"name"`
Provider string `json:"provider"`
Scheme string `json:"scheme"`
SuperAdmin bool `json:"superAdmin"`
Roles []chronograf.Role `json:"roles"`
}
func (r *userRequest) ValidCreate() error {
if r.Name == "" {
return fmt.Errorf("name required on Chronograf User request body")
}
if r.Provider == "" {
return fmt.Errorf("provider required on Chronograf User request body")
}
if r.Scheme == "" {
return fmt.Errorf("scheme required on Chronograf User request body")
}
// TODO: This Scheme value is hard-coded temporarily since we only currently
// support OAuth2. This hard-coding should be removed whenever we add
// support for other authentication schemes.
r.Scheme = "oauth2"
return r.ValidRoles()
}
func (r *userRequest) ValidUpdate() error {
if r.Roles == nil {
return fmt.Errorf("no Roles to update")
}
return r.ValidRoles()
}
func (r *userRequest) ValidRoles() error {
if len(r.Roles) > 0 {
orgs := map[string]bool{}
for _, r := range r.Roles {
if r.Organization == "" {
return fmt.Errorf("no organization was provided")
}
if _, ok := orgs[r.Organization]; ok {
return fmt.Errorf("duplicate organization %q in roles", r.Organization)
}
orgs[r.Organization] = true
switch r.Name {
case roles.MemberRoleName, roles.ViewerRoleName, roles.EditorRoleName, roles.AdminRoleName, roles.WildcardRoleName:
continue
default:
return fmt.Errorf("unknown role %s. Valid roles are 'member', 'viewer', 'editor', 'admin', and '*'", r.Name)
}
}
}
return nil
}
type userResponse struct {
Links selfLinks `json:"links"`
ID uint64 `json:"id,string"`
Name string `json:"name"`
Provider string `json:"provider"`
Scheme string `json:"scheme"`
SuperAdmin bool `json:"superAdmin"`
Roles []chronograf.Role `json:"roles"`
}
func newUserResponse(u *chronograf.User, org string) *userResponse {
// This ensures that any user response with no roles returns an empty array instead of
// null when marshaled into JSON. That way, JavaScript doesn't need any guard on the
// key existing and it can simply be iterated over.
if u.Roles == nil {
u.Roles = []chronograf.Role{}
}
var selfLink string
if org != "" {
selfLink = fmt.Sprintf("/chronograf/v1/organizations/%s/users/%d", org, u.ID)
} else {
selfLink = fmt.Sprintf("/chronograf/v1/users/%d", u.ID)
}
return &userResponse{
ID: u.ID,
Name: u.Name,
Provider: u.Provider,
Scheme: u.Scheme,
Roles: u.Roles,
SuperAdmin: u.SuperAdmin,
Links: selfLinks{
Self: selfLink,
},
}
}
type usersResponse struct {
Links selfLinks `json:"links"`
Users []*userResponse `json:"users"`
}
func newUsersResponse(users []chronograf.User, org string) *usersResponse {
usersResp := make([]*userResponse, len(users))
for i, user := range users {
usersResp[i] = newUserResponse(&user, org)
}
sort.Slice(usersResp, func(i, j int) bool {
return usersResp[i].ID < usersResp[j].ID
})
var selfLink string
if org != "" {
selfLink = fmt.Sprintf("/chronograf/v1/organizations/%s/users", org)
} else {
selfLink = "/chronograf/v1/users"
}
return &usersResponse{
Users: usersResp,
Links: selfLinks{
Self: selfLink,
},
}
}
// UserID retrieves a Chronograf user with ID from store
func (s *Service) UserID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
idStr := httprouter.GetParamFromContext(ctx, "id")
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
Error(w, http.StatusBadRequest, fmt.Sprintf("invalid user id: %s", err.Error()), s.Logger)
return
}
user, err := s.Store.Users(ctx).Get(ctx, chronograf.UserQuery{ID: &id})
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
orgID := httprouter.GetParamFromContext(ctx, "oid")
res := newUserResponse(user, orgID)
location(w, res.Links.Self)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
// NewUser adds a new Chronograf user to store
func (s *Service) NewUser(w http.ResponseWriter, r *http.Request) {
var req userRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
if err := req.ValidCreate(); err != nil {
invalidData(w, err, s.Logger)
return
}
ctx := r.Context()
serverCtx := serverContext(ctx)
cfg, err := s.Store.Config(serverCtx).Get(serverCtx)
if err != nil {
Error(w, http.StatusInternalServerError, err.Error(), s.Logger)
return
}
if err := s.validRoles(serverCtx, req.Roles); err != nil {
invalidData(w, err, s.Logger)
return
}
user := &chronograf.User{
Name: req.Name,
Provider: req.Provider,
Scheme: req.Scheme,
Roles: req.Roles,
}
if cfg.Auth.SuperAdminNewUsers {
req.SuperAdmin = true
}
if err := setSuperAdmin(ctx, req, user); err != nil {
Error(w, http.StatusUnauthorized, err.Error(), s.Logger)
return
}
res, err := s.Store.Users(ctx).Add(ctx, user)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
orgID := httprouter.GetParamFromContext(ctx, "oid")
cu := newUserResponse(res, orgID)
location(w, cu.Links.Self)
encodeJSON(w, http.StatusCreated, cu, s.Logger)
}
// RemoveUser deletes a Chronograf user from store
func (s *Service) RemoveUser(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
idStr := httprouter.GetParamFromContext(ctx, "id")
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
Error(w, http.StatusBadRequest, fmt.Sprintf("invalid user id: %s", err.Error()), s.Logger)
return
}
u, err := s.Store.Users(ctx).Get(ctx, chronograf.UserQuery{ID: &id})
if err != nil {
Error(w, http.StatusNotFound, err.Error(), s.Logger)
return
}
if err := s.Store.Users(ctx).Delete(ctx, u); err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// UpdateUser updates a Chronograf user in store
func (s *Service) UpdateUser(w http.ResponseWriter, r *http.Request) {
var req userRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, s.Logger)
return
}
ctx := r.Context()
idStr := httprouter.GetParamFromContext(ctx, "id")
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
Error(w, http.StatusBadRequest, fmt.Sprintf("invalid user id: %s", err.Error()), s.Logger)
return
}
if err := req.ValidUpdate(); err != nil {
invalidData(w, err, s.Logger)
return
}
u, err := s.Store.Users(ctx).Get(ctx, chronograf.UserQuery{ID: &id})
if err != nil {
Error(w, http.StatusNotFound, err.Error(), s.Logger)
return
}
serverCtx := serverContext(ctx)
if err := s.validRoles(serverCtx, req.Roles); err != nil {
invalidData(w, err, s.Logger)
return
}
// ValidUpdate should ensure that req.Roles is not nil
u.Roles = req.Roles
// If the request contains a name, it must be the same as the
// one on the user. This is particularly useful to the front-end
// because they would like to provide the whole user object,
// including the name, provider, and scheme in update requests.
// But currently, it is not possible to change name, provider, or
// scheme via the API.
if req.Name != "" && req.Name != u.Name {
err := fmt.Errorf("cannot update Name")
invalidData(w, err, s.Logger)
return
}
if req.Provider != "" && req.Provider != u.Provider {
err := fmt.Errorf("cannot update Provider")
invalidData(w, err, s.Logger)
return
}
if req.Scheme != "" && req.Scheme != u.Scheme {
err := fmt.Errorf("cannot update Scheme")
invalidData(w, err, s.Logger)
return
}
// Don't allow SuperAdmins to modify their own SuperAdmin status.
// Allowing them to do so could result in an application where there
// are no super admins.
ctxUser, ok := hasUserContext(ctx)
if !ok {
Error(w, http.StatusInternalServerError, "failed to retrieve user from context", s.Logger)
return
}
// If the user being updated is the user making the request and they are
// changing their SuperAdmin status, return an unauthorized error
if ctxUser.ID == u.ID && u.SuperAdmin && !req.SuperAdmin {
Error(w, http.StatusUnauthorized, "user cannot modify their own SuperAdmin status", s.Logger)
return
}
if err := setSuperAdmin(ctx, req, u); err != nil {
Error(w, http.StatusUnauthorized, err.Error(), s.Logger)
return
}
err = s.Store.Users(ctx).Update(ctx, u)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
orgID := httprouter.GetParamFromContext(ctx, "oid")
cu := newUserResponse(u, orgID)
location(w, cu.Links.Self)
encodeJSON(w, http.StatusOK, cu, s.Logger)
}
// Users retrieves all Chronograf users from store
func (s *Service) Users(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
users, err := s.Store.Users(ctx).All(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), s.Logger)
return
}
orgID := httprouter.GetParamFromContext(ctx, "oid")
res := newUsersResponse(users, orgID)
encodeJSON(w, http.StatusOK, res, s.Logger)
}
func setSuperAdmin(ctx context.Context, req userRequest, user *chronograf.User) error {
// At a high level, this function checks the following
// 1. Is the user making the request a SuperAdmin.
// If they are, allow them to make whatever changes they please.
//
// 2. Is the user making the request trying to change the SuperAdmin
// status. If so, return an error.
//
// 3. If none of the above are the case, let the user make whichever
// changes were requested.
// Only allow users to set SuperAdmin if they have the superadmin context
// TODO(desa): Refactor this https://github.com/influxdata/influxdb/chronograf/issues/2207
if isSuperAdmin := hasSuperAdminContext(ctx); isSuperAdmin {
user.SuperAdmin = req.SuperAdmin
} else if !isSuperAdmin && (user.SuperAdmin != req.SuperAdmin) {
// If req.SuperAdmin has been set, and the request was not made with the SuperAdmin
// context, return error
return fmt.Errorf("user does not have authorization required to set SuperAdmin status. See https://github.com/influxdata/influxdb/chronograf/issues/2601 for more information")
}
return nil
}
func (s *Service) validRoles(ctx context.Context, rs []chronograf.Role) error {
for i, role := range rs {
// verify that the organization exists
org, err := s.Store.Organizations(ctx).Get(ctx, chronograf.OrganizationQuery{ID: &role.Organization})
if err != nil {
return err
}
if role.Name == roles.WildcardRoleName {
role.Name = org.DefaultRole
rs[i] = role
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +0,0 @@
package server
import (
"net/http"
)
// Version handler adds X-Chronograf-Version header to responses
func Version(version string, h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Chronograf-Version", version)
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}

5
go.mod
View File

@ -12,7 +12,6 @@ require (
github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3
github.com/benbjohnson/tmpl v1.0.0
github.com/boltdb/bolt v1.3.1 // indirect
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e
github.com/cespare/xxhash v1.1.0
github.com/davecgh/go-spew v1.1.1
@ -48,8 +47,6 @@ require (
github.com/influxdata/influx-cli/v2 v2.0.0-20210713195937-a69f06b41b45
github.com/influxdata/influxql v0.0.0-20180925231337-1cbfca8e56b6
github.com/influxdata/pkg-config v0.2.7
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368
github.com/jessevdk/go-flags v1.4.0
github.com/jmoiron/sqlx v1.3.4
github.com/jsternberg/zap-logfmt v1.2.0
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef
@ -82,7 +79,6 @@ require (
github.com/stretchr/testify v1.7.0
github.com/testcontainers/testcontainers-go v0.0.0-20190108154635-47c0da630f72
github.com/tinylib/msgp v1.1.0
github.com/tylerb/graceful v1.2.15
github.com/uber/jaeger-client-go v2.28.0+incompatible
github.com/willf/bitset v1.1.9 // indirect
github.com/xlab/treeprint v1.0.0
@ -93,7 +89,6 @@ require (
go.uber.org/multierr v1.5.0
go.uber.org/zap v1.14.1
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/net v0.0.0-20210119194325-5f4716e94777
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c

8
go.sum
View File

@ -111,8 +111,6 @@ github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM=
github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ=
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5 h1:kS0dw4K730x7cxT+bVyTyYJZHuSoH7ofSr/Ijit56Qw=
github.com/bouk/httprouter v0.0.0-20160817010721-ee8b3818a7f5/go.mod h1:CDReaxg1cmLrtcasZy43l4EYPAknXLiQSrb7tLw5zXM=
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE=
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e/go.mod h1:errmMKH8tTB49UR2A8C8DPYkyudelsYJwJFaZHQ6ik8=
github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=
@ -358,10 +356,6 @@ github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPIn
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68ZBRvtCjBi3QSosCIKrjmMbYlQMFAwVLds4=
github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaFa4YD1Q+7bH9o5NCHQGPMqZCYJiNW6lIIS9z4=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w=
@ -589,8 +583,6 @@ github.com/testcontainers/testcontainers-go v0.0.0-20190108154635-47c0da630f72/g
github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tylerb/graceful v1.2.15 h1:B0x01Y8fsJpogzZTkDg6BDi6eMf03s01lEKGdrv83oA=
github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II=
github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg=
github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU=
github.com/uber/athenadriver v1.1.4 h1:k6k0RBeXjR7oZ8NO557MsRw3eX1cc/9B0GNx+W9eHiQ=