chore: delete as many files from chronograf/ as we can to still build (#21893)

pull/21900/head
Daniel Moran 2021-07-20 12:33:22 -04:00 committed by GitHub
parent 7ea348db35
commit 4d76491746
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
157 changed files with 0 additions and 34524 deletions

View File

@ -1,14 +0,0 @@
[bumpversion]
current_version = 1.5.0.0
files = README.md server/swagger.json server/swagger_v2.yml
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)\.(?P<release>\d+)
serialize = {major}.{minor}.{patch}.{release}
[bumpversion:part:release]
[bumpversion:file:ui/package.json]
search = "version": "{current_version}"
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)-(?P<release>\d+)
serialize = {major}.{minor}.{patch}-{release}
replace = "version": "{new_version}"

View File

@ -1,67 +0,0 @@
package kapacitor
import (
"bytes"
"encoding/json"
"regexp"
"strings"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/pipeline/tick"
)
// AlertServices generates alert chaining methods to be attached to an alert from all rule Services
func AlertServices(rule chronograf.AlertRule) (string, error) {
node, err := addAlertNodes(rule.AlertNodes)
if err != nil {
return "", err
}
if err := ValidateAlert(node); err != nil {
return "", err
}
return node, nil
}
func addAlertNodes(handlers chronograf.AlertNodes) (string, error) {
octets, err := json.Marshal(&handlers)
if err != nil {
return "", err
}
stream := &pipeline.StreamNode{}
pipe := pipeline.CreatePipelineSources(stream)
from := stream.From()
node := from.Alert()
if err = json.Unmarshal(octets, node); err != nil {
return "", err
}
aster := tick.AST{}
err = aster.Build(pipe)
if err != nil {
return "", err
}
var buf bytes.Buffer
aster.Program.Format(&buf, "", false)
rawTick := buf.String()
return toOldSchema(rawTick), nil
}
var (
removeID = regexp.MustCompile(`(?m)\s*\.id\(.*\)$`) // Remove to use ID variable
removeMessage = regexp.MustCompile(`(?m)\s*\.message\(.*\)$`) // Remove to use message variable
removeDetails = regexp.MustCompile(`(?m)\s*\.details\(.*\)$`) // Remove to use details variable
removeHistory = regexp.MustCompile(`(?m)\s*\.history\(21\)$`) // Remove default history
)
func toOldSchema(rawTick string) string {
rawTick = strings.Replace(rawTick, "stream\n |from()\n |alert()", "", -1)
rawTick = removeID.ReplaceAllString(rawTick, "")
rawTick = removeMessage.ReplaceAllString(rawTick, "")
rawTick = removeDetails.ReplaceAllString(rawTick, "")
rawTick = removeHistory.ReplaceAllString(rawTick, "")
return rawTick
}

View File

@ -1,228 +0,0 @@
package kapacitor
import (
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestAlertServices(t *testing.T) {
tests := []struct {
name string
rule chronograf.AlertRule
want chronograf.TICKScript
wantErr bool
}{
{
name: "Test several valid services",
rule: chronograf.AlertRule{
AlertNodes: chronograf.AlertNodes{
Slack: []*chronograf.Slack{{}},
VictorOps: []*chronograf.VictorOps{{}},
Email: []*chronograf.Email{{}},
},
},
want: `alert()
.email()
.victorOps()
.slack()
`,
},
{
name: "Test single valid service",
rule: chronograf.AlertRule{
AlertNodes: chronograf.AlertNodes{
Slack: []*chronograf.Slack{{}},
},
},
want: `alert()
.slack()
`,
},
{
name: "Test pushoverservice",
rule: chronograf.AlertRule{
AlertNodes: chronograf.AlertNodes{
Pushover: []*chronograf.Pushover{
{
Device: "asdf",
Title: "asdf",
Sound: "asdf",
URL: "http://moo.org",
URLTitle: "influxdata",
},
},
},
},
want: `alert()
.pushover()
.device('asdf')
.title('asdf')
.uRL('http://moo.org')
.uRLTitle('influxdata')
.sound('asdf')
`,
},
{
name: "Test single valid service and property",
rule: chronograf.AlertRule{
AlertNodes: chronograf.AlertNodes{
Slack: []*chronograf.Slack{
{
Channel: "#general",
},
},
},
},
want: `alert()
.slack()
.channel('#general')
`,
},
{
name: "Test tcp",
rule: chronograf.AlertRule{
AlertNodes: chronograf.AlertNodes{
TCPs: []*chronograf.TCP{
{
Address: "myaddress:22",
},
},
},
},
want: `alert()
.tcp('myaddress:22')
`,
},
{
name: "Test log",
rule: chronograf.AlertRule{
AlertNodes: chronograf.AlertNodes{
Log: []*chronograf.Log{
{
FilePath: "/tmp/alerts.log",
},
},
},
},
want: `alert()
.log('/tmp/alerts.log')
`,
},
{
name: "Test http as post",
rule: chronograf.AlertRule{
AlertNodes: chronograf.AlertNodes{
Posts: []*chronograf.Post{
{
URL: "http://myaddress",
},
},
},
},
want: `alert()
.post('http://myaddress')
`,
},
{
name: "Test post with headers",
rule: chronograf.AlertRule{
AlertNodes: chronograf.AlertNodes{
Posts: []*chronograf.Post{
{
URL: "http://myaddress",
Headers: map[string]string{"key": "value"},
},
},
},
},
want: `alert()
.post('http://myaddress')
.header('key', 'value')
`,
},
}
for _, tt := range tests {
got, err := AlertServices(tt.rule)
if (err != nil) != tt.wantErr {
t.Errorf("%q. AlertServices() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if tt.wantErr {
continue
}
formatted, err := formatTick("alert()" + got)
if err != nil {
t.Errorf("%q. formatTick() error = %v", tt.name, err)
continue
}
if formatted != tt.want {
t.Errorf("%q. AlertServices() = %v, want %v", tt.name, formatted, tt.want)
}
}
}
func Test_addAlertNodes(t *testing.T) {
tests := []struct {
name string
handlers chronograf.AlertNodes
want string
wantErr bool
}{
{
name: "test email alerts",
handlers: chronograf.AlertNodes{
IsStateChangesOnly: true,
Email: []*chronograf.Email{
{
To: []string{
"me@me.com", "you@you.com",
},
},
},
},
want: `
.stateChangesOnly()
.email()
.to('me@me.com')
.to('you@you.com')
`,
},
{
name: "test pushover alerts",
handlers: chronograf.AlertNodes{
IsStateChangesOnly: true,
Pushover: []*chronograf.Pushover{
{
Device: "asdf",
Title: "asdf",
Sound: "asdf",
URL: "http://moo.org",
URLTitle: "influxdata",
},
},
},
want: `
.stateChangesOnly()
.pushover()
.device('asdf')
.title('asdf')
.uRL('http://moo.org')
.uRLTitle('influxdata')
.sound('asdf')
`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := addAlertNodes(tt.handlers)
if (err != nil) != tt.wantErr {
t.Errorf("addAlertNodes() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("addAlertNodes() =\n%v\n, want\n%v", got, tt.want)
}
})
}
}

View File

@ -1,502 +0,0 @@
package kapacitor
import (
"encoding/json"
"regexp"
"strconv"
"strings"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/tick"
"github.com/influxdata/kapacitor/tick/ast"
"github.com/influxdata/kapacitor/tick/stateful"
)
func varString(kapaVar string, vars map[string]tick.Var) (string, bool) {
var ok bool
v, ok := vars[kapaVar]
if !ok {
return "", ok
}
strVar, ok := v.Value.(string)
return strVar, ok
}
func varValue(kapaVar string, vars map[string]tick.Var) (string, bool) {
var ok bool
v, ok := vars[kapaVar]
if !ok {
return "", ok
}
switch val := v.Value.(type) {
case string:
return val, true
case float64:
return strconv.FormatFloat(val, 'f', -1, 32), true
case int64:
return strconv.FormatInt(val, 10), true
case bool:
return strconv.FormatBool(val), true
case time.Time:
return val.String(), true
case *regexp.Regexp:
return val.String(), true
default:
return "", false
}
}
func varDuration(kapaVar string, vars map[string]tick.Var) (string, bool) {
var ok bool
v, ok := vars[kapaVar]
if !ok {
return "", ok
}
durVar, ok := v.Value.(time.Duration)
if !ok {
return "", ok
}
return durVar.String(), true
}
func varStringList(kapaVar string, vars map[string]tick.Var) ([]string, bool) {
v, ok := vars[kapaVar]
if !ok {
return nil, ok
}
list, ok := v.Value.([]tick.Var)
if !ok {
return nil, ok
}
strs := make([]string, len(list))
for i, l := range list {
s, ok := l.Value.(string)
if !ok {
return nil, ok
}
strs[i] = s
}
return strs, ok
}
// WhereFilter filters the stream data in a TICKScript
type WhereFilter struct {
TagValues map[string][]string // Tags are filtered by an array of values
Operator string // Operator is == or !=
}
func varWhereFilter(vars map[string]tick.Var) (WhereFilter, bool) {
// All chronograf TICKScripts have whereFilters.
v, ok := vars["whereFilter"]
if !ok {
return WhereFilter{}, ok
}
filter := WhereFilter{}
filter.TagValues = make(map[string][]string)
// All chronograf TICKScript's whereFilter use a lambda function.
value, ok := v.Value.(*ast.LambdaNode)
if !ok {
return WhereFilter{}, ok
}
lambda := value.ExpressionString()
// Chronograf TICKScripts use lambda: TRUE as a pass-throug where clause
// if the script does not have a where clause set.
if lambda == "TRUE" {
return WhereFilter{}, true
}
opSet := map[string]struct{}{} // All ops must be the same b/c queryConfig
// Otherwise the lambda function will be several "tag" op 'value' expressions.
var re = regexp.MustCompile(`(?U)"(.*)"\s+(==|!=)\s+'(.*)'`)
for _, match := range re.FindAllStringSubmatch(lambda, -1) {
tag, op, value := match[1], match[2], match[3]
opSet[op] = struct{}{}
values, ok := filter.TagValues[tag]
if !ok {
values = make([]string, 0)
}
values = append(values, value)
filter.TagValues[tag] = values
}
// An obscure piece of the queryConfig is that the operator in ALL binary
// expressions just be the same. So, there must only be one operator
// in our opSet
if len(opSet) != 1 {
return WhereFilter{}, false
}
for op := range opSet {
if op != "==" && op != "!=" {
return WhereFilter{}, false
}
filter.Operator = op
}
return filter, true
}
// CommonVars includes all the variables of a chronograf TICKScript
type CommonVars struct {
DB string
RP string
Measurement string
Name string
Message string
TriggerType string
GroupBy []string
Filter WhereFilter
Period string
Every string
Detail string
}
// ThresholdVars represents the critical value where an alert occurs
type ThresholdVars struct {
Crit string
}
// RangeVars represents the critical range where an alert occurs
type RangeVars struct {
Lower string
Upper string
}
// RelativeVars represents the critical range and time in the past an alert occurs
type RelativeVars struct {
Shift string
Crit string
}
// DeadmanVars represents a deadman alert
type DeadmanVars struct{}
func extractCommonVars(vars map[string]tick.Var) (CommonVars, error) {
res := CommonVars{}
// All these variables must exist to be a chronograf TICKScript
// If any of these don't exist, then this isn't a tickscript we can process
var ok bool
res.DB, ok = varString("db", vars)
if !ok {
return CommonVars{}, ErrNotChronoTickscript
}
res.RP, ok = varString("rp", vars)
if !ok {
return CommonVars{}, ErrNotChronoTickscript
}
res.Measurement, ok = varString("measurement", vars)
if !ok {
return CommonVars{}, ErrNotChronoTickscript
}
res.Name, ok = varString("name", vars)
if !ok {
return CommonVars{}, ErrNotChronoTickscript
}
res.Message, ok = varString("message", vars)
if !ok {
return CommonVars{}, ErrNotChronoTickscript
}
res.TriggerType, ok = varString("triggerType", vars)
if !ok {
return CommonVars{}, ErrNotChronoTickscript
}
// All chronograf TICKScripts have groupBy. Possible to be empty list though.
groups, ok := varStringList("groupBy", vars)
if !ok {
return CommonVars{}, ErrNotChronoTickscript
}
res.GroupBy = groups
// All chronograf TICKScripts must have a whereFitler. Could be empty.
res.Filter, ok = varWhereFilter(vars)
if !ok {
return CommonVars{}, ErrNotChronoTickscript
}
// Some chronograf TICKScripts have details associated with the alert.
// Typically, this is the body of an email alert.
if detail, ok := varString("details", vars); ok {
res.Detail = detail
}
// Relative and Threshold alerts may have an every variables
if every, ok := varDuration("every", vars); ok {
res.Every = every
}
// All alert types may have a period variables
if period, ok := varDuration("period", vars); ok {
res.Period = period
}
return res, nil
}
func extractAlertVars(vars map[string]tick.Var) (interface{}, error) {
// Depending on the type of the alert the variables set will be different
alertType, ok := varString("triggerType", vars)
if !ok {
return nil, ErrNotChronoTickscript
}
switch alertType {
case Deadman:
return &DeadmanVars{}, nil
case Threshold:
if crit, ok := varValue("crit", vars); ok {
return &ThresholdVars{
Crit: crit,
}, nil
}
r := &RangeVars{}
// Threshold Range alerts must have both an upper and lower bound
if r.Lower, ok = varValue("lower", vars); !ok {
return nil, ErrNotChronoTickscript
}
if r.Upper, ok = varValue("upper", vars); !ok {
return nil, ErrNotChronoTickscript
}
return r, nil
case Relative:
// Relative alerts must have a time shift and critical value
r := &RelativeVars{}
if r.Shift, ok = varDuration("shift", vars); !ok {
return nil, ErrNotChronoTickscript
}
if r.Crit, ok = varValue("crit", vars); !ok {
return nil, ErrNotChronoTickscript
}
return r, nil
default:
return nil, ErrNotChronoTickscript
}
}
// FieldFunc represents the field used as the alert value and its optional aggregate function
type FieldFunc struct {
Field string
Func string
}
func extractFieldFunc(script chronograf.TICKScript) FieldFunc {
// If the TICKScript is relative or threshold alert with an aggregate
// then the aggregate function and field is in the form |func('field').as('value')
var re = regexp.MustCompile(`(?Um)\|(\w+)\('(.*)'\)\s*\.as\('value'\)`)
for _, match := range re.FindAllStringSubmatch(string(script), -1) {
fn, field := match[1], match[2]
return FieldFunc{
Field: field,
Func: fn,
}
}
// If the alert does not have an aggregate then the the value function will
// be this form: |eval(lambda: "%s").as('value')
re = regexp.MustCompile(`(?Um)\|eval\(lambda: "(.*)"\)\s*\.as\('value'\)`)
for _, match := range re.FindAllStringSubmatch(string(script), -1) {
field := match[1]
return FieldFunc{
Field: field,
}
}
// Otherwise, if this could be a deadman alert and not have a FieldFunc
return FieldFunc{}
}
// CritCondition represents the operators that determine when the alert should go critical
type CritCondition struct {
Operators []string
}
func extractCrit(script chronograf.TICKScript) CritCondition {
// Threshold and relative alerts have the form .crit(lambda: "value" op crit)
// Threshold range alerts have the form .crit(lambda: "value" op lower op "value" op upper)
var re = regexp.MustCompile(`(?Um)\.crit\(lambda:\s+"value"\s+(.*)\s+crit\)`)
for _, match := range re.FindAllStringSubmatch(string(script), -1) {
op := match[1]
return CritCondition{
Operators: []string{
op,
},
}
}
re = regexp.MustCompile(`(?Um)\.crit\(lambda:\s+"value"\s+(.*)\s+lower\s+(.*)\s+"value"\s+(.*)\s+upper\)`)
for _, match := range re.FindAllStringSubmatch(string(script), -1) {
lower, compound, upper := match[1], match[2], match[3]
return CritCondition{
Operators: []string{
lower,
compound,
upper,
},
}
}
// It's possible to not have a critical condition if this is
// a deadman alert
return CritCondition{}
}
// alertType reads the TICKscript and returns the specific
// alerting type. If it is unable to determine it will
// return ErrNotChronoTickscript
func alertType(script chronograf.TICKScript) (string, error) {
t := string(script)
if strings.Contains(t, `var triggerType = 'threshold'`) {
if strings.Contains(t, `var crit = `) {
return Threshold, nil
} else if strings.Contains(t, `var lower = `) && strings.Contains(t, `var upper = `) {
return ThresholdRange, nil
}
return "", ErrNotChronoTickscript
} else if strings.Contains(t, `var triggerType = 'relative'`) {
if strings.Contains(t, `eval(lambda: float("current.value" - "past.value"))`) {
return ChangeAmount, nil
} else if strings.Contains(t, `|eval(lambda: abs(float("current.value" - "past.value")) / float("past.value") * 100.0)`) {
return ChangePercent, nil
}
return "", ErrNotChronoTickscript
} else if strings.Contains(t, `var triggerType = 'deadman'`) {
return Deadman, nil
}
return "", ErrNotChronoTickscript
}
// Reverse converts tickscript to an AlertRule
func Reverse(script chronograf.TICKScript) (chronograf.AlertRule, error) {
rule := chronograf.AlertRule{
Query: &chronograf.QueryConfig{},
}
t, err := alertType(script)
if err != nil {
return rule, err
}
scope := stateful.NewScope()
template, err := pipeline.CreateTemplatePipeline(string(script), pipeline.StreamEdge, scope, &deadman{})
if err != nil {
return chronograf.AlertRule{}, err
}
vars := template.Vars()
commonVars, err := extractCommonVars(vars)
if err != nil {
return rule, err
}
alertVars, err := extractAlertVars(vars)
if err != nil {
return rule, err
}
fieldFunc := extractFieldFunc(script)
critCond := extractCrit(script)
switch t {
case Threshold, ChangeAmount, ChangePercent:
if len(critCond.Operators) != 1 {
return rule, ErrNotChronoTickscript
}
case ThresholdRange:
if len(critCond.Operators) != 3 {
return rule, ErrNotChronoTickscript
}
}
rule.Name = commonVars.Name
rule.Trigger = commonVars.TriggerType
rule.Message = commonVars.Message
rule.Details = commonVars.Detail
rule.Query.Database = commonVars.DB
rule.Query.RetentionPolicy = commonVars.RP
rule.Query.Measurement = commonVars.Measurement
rule.Query.GroupBy.Tags = commonVars.GroupBy
if commonVars.Filter.Operator == "==" {
rule.Query.AreTagsAccepted = true
}
rule.Query.Tags = commonVars.Filter.TagValues
if t == Deadman {
rule.TriggerValues.Period = commonVars.Period
} else {
rule.Query.GroupBy.Time = commonVars.Period
rule.Every = commonVars.Every
if fieldFunc.Func != "" {
rule.Query.Fields = []chronograf.Field{
{
Type: "func",
Value: fieldFunc.Func,
Args: []chronograf.Field{
{
Value: fieldFunc.Field,
Type: "field",
},
},
},
}
} else {
rule.Query.Fields = []chronograf.Field{
{
Type: "field",
Value: fieldFunc.Field,
},
}
}
}
switch t {
case ChangeAmount, ChangePercent:
rule.TriggerValues.Change = t
rule.TriggerValues.Operator, err = chronoOperator(critCond.Operators[0])
if err != nil {
return rule, ErrNotChronoTickscript
}
v, ok := alertVars.(*RelativeVars)
if !ok {
return rule, ErrNotChronoTickscript
}
rule.TriggerValues.Value = v.Crit
rule.TriggerValues.Shift = v.Shift
case Threshold:
rule.TriggerValues.Operator, err = chronoOperator(critCond.Operators[0])
if err != nil {
return rule, ErrNotChronoTickscript
}
v, ok := alertVars.(*ThresholdVars)
if !ok {
return rule, ErrNotChronoTickscript
}
rule.TriggerValues.Value = v.Crit
case ThresholdRange:
rule.TriggerValues.Operator, err = chronoRangeOperators(critCond.Operators)
v, ok := alertVars.(*RangeVars)
if !ok {
return rule, ErrNotChronoTickscript
}
rule.TriggerValues.Value = v.Lower
rule.TriggerValues.RangeValue = v.Upper
}
p, err := pipeline.CreatePipeline(string(script), pipeline.StreamEdge, stateful.NewScope(), &deadman{}, vars)
if err != nil {
return chronograf.AlertRule{}, err
}
err = extractAlertNodes(p, &rule)
return rule, err
}
func extractAlertNodes(p *pipeline.Pipeline, rule *chronograf.AlertRule) error {
return p.Walk(func(n pipeline.Node) error {
switch node := n.(type) {
case *pipeline.AlertNode:
octets, err := json.MarshalIndent(node, "", " ")
if err != nil {
return err
}
return json.Unmarshal(octets, &rule.AlertNodes)
}
return nil
})
}

File diff suppressed because it is too large Load Diff

View File

@ -1,415 +0,0 @@
package kapacitor
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/id"
client "github.com/influxdata/kapacitor/client/v1"
)
const (
// Prefix is prepended to the ID of all alerts
Prefix = "chronograf-v1-"
// FetchRate is the rate Paginating Kapacitor Clients will consume responses
FetchRate = 100
)
// Client communicates to kapacitor
type Client struct {
URL string
Username string
Password string
InsecureSkipVerify bool
ID chronograf.ID
Ticker chronograf.Ticker
kapaClient func(url, username, password string, insecureSkipVerify bool) (KapaClient, error)
}
// KapaClient represents a connection to a kapacitor instance
type KapaClient interface {
CreateTask(opt client.CreateTaskOptions) (client.Task, error)
Task(link client.Link, opt *client.TaskOptions) (client.Task, error)
ListTasks(opt *client.ListTasksOptions) ([]client.Task, error)
UpdateTask(link client.Link, opt client.UpdateTaskOptions) (client.Task, error)
DeleteTask(link client.Link) error
}
// NewClient creates a client that interfaces with Kapacitor tasks
func NewClient(url, username, password string, insecureSkipVerify bool) *Client {
return &Client{
URL: url,
Username: username,
Password: password,
InsecureSkipVerify: insecureSkipVerify,
ID: &id.UUID{},
Ticker: &Alert{},
kapaClient: NewKapaClient,
}
}
// Task represents a running kapacitor task
type Task struct {
ID string // Kapacitor ID
Href string // Kapacitor relative URI
HrefOutput string // Kapacitor relative URI to HTTPOutNode
Rule chronograf.AlertRule // Rule is the rule that represents this Task
TICKScript chronograf.TICKScript // TICKScript is the running script
}
// NewTask creates a task from a kapacitor client task
func NewTask(task *client.Task) *Task {
dbrps := make([]chronograf.DBRP, len(task.DBRPs))
for i := range task.DBRPs {
dbrps[i].DB = task.DBRPs[i].Database
dbrps[i].RP = task.DBRPs[i].RetentionPolicy
}
script := chronograf.TICKScript(task.TICKscript)
rule, err := Reverse(script)
if err != nil {
rule = chronograf.AlertRule{
Name: task.ID,
Query: nil,
}
}
rule.ID = task.ID
rule.TICKScript = script
rule.Type = task.Type.String()
rule.DBRPs = dbrps
rule.Status = task.Status.String()
rule.Executing = task.Executing
rule.Error = task.Error
rule.Created = task.Created
rule.Modified = task.Modified
rule.LastEnabled = task.LastEnabled
return &Task{
ID: task.ID,
Href: task.Link.Href,
HrefOutput: HrefOutput(task.ID),
Rule: rule,
}
}
// HrefOutput returns the link to a kapacitor task httpOut Node given an id
func HrefOutput(ID string) string {
return fmt.Sprintf("/kapacitor/v1/tasks/%s/%s", ID, HTTPEndpoint)
}
// Href returns the link to a kapacitor task given an id
func (c *Client) Href(ID string) string {
return fmt.Sprintf("/kapacitor/v1/tasks/%s", ID)
}
// HrefOutput returns the link to a kapacitor task httpOut Node given an id
func (c *Client) HrefOutput(ID string) string {
return HrefOutput(ID)
}
// Create builds and POSTs a tickscript to kapacitor
func (c *Client) Create(ctx context.Context, rule chronograf.AlertRule) (*Task, error) {
var opt *client.CreateTaskOptions
var err error
if rule.Query != nil {
opt, err = c.createFromQueryConfig(rule)
} else {
opt, err = c.createFromTick(rule)
}
if err != nil {
return nil, err
}
kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify)
if err != nil {
return nil, err
}
task, err := kapa.CreateTask(*opt)
if err != nil {
return nil, err
}
return NewTask(&task), nil
}
func (c *Client) createFromTick(rule chronograf.AlertRule) (*client.CreateTaskOptions, error) {
dbrps := make([]client.DBRP, len(rule.DBRPs))
for i := range rule.DBRPs {
dbrps[i] = client.DBRP{
Database: rule.DBRPs[i].DB,
RetentionPolicy: rule.DBRPs[i].RP,
}
}
status := client.Enabled
if rule.Status != "" {
if err := status.UnmarshalText([]byte(rule.Status)); err != nil {
return nil, err
}
}
taskType := client.StreamTask
if rule.Type != "stream" {
if err := taskType.UnmarshalText([]byte(rule.Type)); err != nil {
return nil, err
}
}
return &client.CreateTaskOptions{
ID: rule.Name,
Type: taskType,
DBRPs: dbrps,
TICKscript: string(rule.TICKScript),
Status: status,
}, nil
}
func (c *Client) createFromQueryConfig(rule chronograf.AlertRule) (*client.CreateTaskOptions, error) {
id, err := c.ID.Generate()
if err != nil {
return nil, err
}
script, err := c.Ticker.Generate(rule)
if err != nil {
return nil, err
}
kapaID := Prefix + id
return &client.CreateTaskOptions{
ID: kapaID,
Type: toTask(rule.Query),
DBRPs: []client.DBRP{{Database: rule.Query.Database, RetentionPolicy: rule.Query.RetentionPolicy}},
TICKscript: string(script),
Status: client.Enabled,
}, nil
}
// Delete removes tickscript task from kapacitor
func (c *Client) Delete(ctx context.Context, href string) error {
kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify)
if err != nil {
return err
}
return kapa.DeleteTask(client.Link{Href: href})
}
func (c *Client) updateStatus(ctx context.Context, href string, status client.TaskStatus) (*Task, error) {
kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify)
if err != nil {
return nil, err
}
opts := client.UpdateTaskOptions{
Status: status,
}
task, err := kapa.UpdateTask(client.Link{Href: href}, opts)
if err != nil {
return nil, err
}
return NewTask(&task), nil
}
// Disable changes the tickscript status to disabled for a given href.
func (c *Client) Disable(ctx context.Context, href string) (*Task, error) {
return c.updateStatus(ctx, href, client.Disabled)
}
// Enable changes the tickscript status to disabled for a given href.
func (c *Client) Enable(ctx context.Context, href string) (*Task, error) {
return c.updateStatus(ctx, href, client.Enabled)
}
// Status returns the status of a task in kapacitor
func (c *Client) Status(ctx context.Context, href string) (string, error) {
s, err := c.status(ctx, href)
if err != nil {
return "", err
}
return s.String(), nil
}
func (c *Client) status(ctx context.Context, href string) (client.TaskStatus, error) {
kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify)
if err != nil {
return 0, err
}
task, err := kapa.Task(client.Link{Href: href}, nil)
if err != nil {
return 0, err
}
return task.Status, nil
}
// All returns all tasks in kapacitor
func (c *Client) All(ctx context.Context) (map[string]*Task, error) {
kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify)
if err != nil {
return nil, err
}
// Only get the status, id and link section back
opts := &client.ListTasksOptions{}
tasks, err := kapa.ListTasks(opts)
if err != nil {
return nil, err
}
all := map[string]*Task{}
for _, task := range tasks {
all[task.ID] = NewTask(&task)
}
return all, nil
}
// Reverse builds a chronograf.AlertRule and its QueryConfig from a tickscript
func (c *Client) Reverse(id string, script chronograf.TICKScript) chronograf.AlertRule {
rule, err := Reverse(script)
if err != nil {
return chronograf.AlertRule{
ID: id,
Name: id,
Query: nil,
TICKScript: script,
}
}
rule.ID = id
rule.TICKScript = script
return rule
}
// Get returns a single alert in kapacitor
func (c *Client) Get(ctx context.Context, id string) (*Task, error) {
kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify)
if err != nil {
return nil, err
}
href := c.Href(id)
task, err := kapa.Task(client.Link{Href: href}, nil)
if err != nil {
return nil, chronograf.ErrAlertNotFound
}
return NewTask(&task), nil
}
// Update changes the tickscript of a given id.
func (c *Client) Update(ctx context.Context, href string, rule chronograf.AlertRule) (*Task, error) {
kapa, err := c.kapaClient(c.URL, c.Username, c.Password, c.InsecureSkipVerify)
if err != nil {
return nil, err
}
prevStatus, err := c.status(ctx, href)
if err != nil {
return nil, err
}
var opt *client.UpdateTaskOptions
if rule.Query != nil {
opt, err = c.updateFromQueryConfig(rule)
} else {
opt, err = c.updateFromTick(rule)
}
if err != nil {
return nil, err
}
task, err := kapa.UpdateTask(client.Link{Href: href}, *opt)
if err != nil {
return nil, err
}
// Now enable the task if previously enabled
if prevStatus == client.Enabled {
if _, err := c.Enable(ctx, href); err != nil {
return nil, err
}
}
return NewTask(&task), nil
}
func (c *Client) updateFromQueryConfig(rule chronograf.AlertRule) (*client.UpdateTaskOptions, error) {
script, err := c.Ticker.Generate(rule)
if err != nil {
return nil, err
}
// We need to disable the kapacitor task followed by enabling it during update.
return &client.UpdateTaskOptions{
TICKscript: string(script),
Status: client.Disabled,
Type: toTask(rule.Query),
DBRPs: []client.DBRP{
{
Database: rule.Query.Database,
RetentionPolicy: rule.Query.RetentionPolicy,
},
},
}, nil
}
func (c *Client) updateFromTick(rule chronograf.AlertRule) (*client.UpdateTaskOptions, error) {
dbrps := make([]client.DBRP, len(rule.DBRPs))
for i := range rule.DBRPs {
dbrps[i] = client.DBRP{
Database: rule.DBRPs[i].DB,
RetentionPolicy: rule.DBRPs[i].RP,
}
}
taskType := client.StreamTask
if rule.Type != "stream" {
if err := taskType.UnmarshalText([]byte(rule.Type)); err != nil {
return nil, err
}
}
// We need to disable the kapacitor task followed by enabling it during update.
return &client.UpdateTaskOptions{
TICKscript: string(rule.TICKScript),
Status: client.Disabled,
Type: taskType,
DBRPs: dbrps,
}, nil
}
func toTask(q *chronograf.QueryConfig) client.TaskType {
if q == nil || q.RawText == nil || *q.RawText == "" {
return client.StreamTask
}
return client.BatchTask
}
// NewKapaClient creates a Kapacitor client connection
func NewKapaClient(url, username, password string, insecureSkipVerify bool) (KapaClient, error) {
var creds *client.Credentials
if username != "" {
creds = &client.Credentials{
Method: client.UserAuthentication,
Username: username,
Password: password,
}
}
clnt, err := client.New(client.Config{
URL: url,
Credentials: creds,
InsecureSkipVerify: insecureSkipVerify,
})
if err != nil {
return clnt, err
}
return &PaginatingKapaClient{clnt, FetchRate}, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,63 +0,0 @@
package kapacitor
import (
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
// Data returns the tickscript data section for querying
func Data(rule chronograf.AlertRule) (string, error) {
if rule.Query.RawText != nil && *rule.Query.RawText != "" {
batch := `
var data = batch
|query('''
%s
''')
.period(period)
.every(every)
.align()`
batch = fmt.Sprintf(batch, rule.Query.RawText)
if rule.Query.GroupBy.Time != "" {
batch = batch + fmt.Sprintf(".groupBy(%s)", rule.Query.GroupBy.Time)
}
return batch, nil
}
stream := `var data = stream
|from()
.database(db)
.retentionPolicy(rp)
.measurement(measurement)
`
stream = fmt.Sprintf("%s\n.groupBy(groupBy)\n", stream)
stream = stream + ".where(whereFilter)\n"
// Only need aggregate functions for threshold and relative
if rule.Trigger != "deadman" {
fld, err := field(rule.Query)
if err != nil {
return "", err
}
value := ""
for _, field := range rule.Query.Fields {
if field.Type == "func" && len(field.Args) > 0 && field.Args[0].Type == "field" {
// Only need a window if we have an aggregate function
value = value + "|window().period(period).every(every).align()\n"
value = value + fmt.Sprintf(`|%s('%s').as('value')`, field.Value, field.Args[0].Value)
break // only support a single field
}
if value != "" {
break // only support a single field
}
if field.Type == "field" {
value = fmt.Sprintf(`|eval(lambda: "%s").as('value')`, field.Value)
}
}
if value == "" {
value = fmt.Sprintf(`|eval(lambda: "%s").as('value')`, fld)
}
stream = stream + value
}
return stream, nil
}

View File

@ -1,60 +0,0 @@
package kapacitor
import (
"encoding/json"
"fmt"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
var config = `{
"id": "93e17825-2fb0-4507-87bd-a0c136947f7e",
"database": "telegraf",
"measurement": "cpu",
"retentionPolicy": "default",
"fields": [{
"field": "usage_user",
"funcs": ["mean"]
}],
"tags": {
"host": [
"acc-0eabc309-eu-west-1-data-3",
"prod"
],
"cpu": [
"cpu_total"
]
},
"groupBy": {
"time": null,
"tags": [
"host",
"cluster_id"
]
},
"areTagsAccepted": true,
"rawText": null
}`
func TestData(t *testing.T) {
q := chronograf.QueryConfig{}
err := json.Unmarshal([]byte(config), &q)
if err != nil {
t.Errorf("Error unmarshalling %v", err)
}
alert := chronograf.AlertRule{
Trigger: "deadman",
Query: &q,
}
if tick, err := Data(alert); err != nil {
t.Errorf("Error creating tick %v", err)
} else {
_, err := formatTick(tick)
if err != nil {
fmt.Print(tick)
t.Errorf("Error formatting tick %v", err)
}
}
}

View File

@ -1,12 +0,0 @@
package kapacitor
// ErrNotChronoTickscript signals a TICKscript that cannot be parsed into
// chronograf data structure.
const ErrNotChronoTickscript = Error("TICKscript not built with chronograf builder")
// Error are kapacitor errors due to communication or processing of TICKscript to kapacitor
type Error string
func (e Error) Error() string {
return string(e)
}

View File

@ -1,15 +0,0 @@
package kapacitor
import (
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
// HTTPEndpoint is the default location of the tickscript output
const HTTPEndpoint = "output"
// HTTPOut adds a kapacitor httpOutput to a tickscript
func HTTPOut(rule chronograf.AlertRule) (string, error) {
return fmt.Sprintf(`trigger|httpOut('%s')`, HTTPEndpoint), nil
}

View File

@ -1,34 +0,0 @@
package kapacitor
import (
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
// InfluxOut creates a kapacitor influxDBOut node to write alert data to Database, RP, Measurement.
func InfluxOut(rule chronograf.AlertRule) (string, error) {
// For some of the alert, the data needs to be renamed (normalized)
// before being sent to influxdb.
rename := ""
if rule.Trigger == "deadman" {
rename = `|eval(lambda: "emitted")
.as('value')
.keep('value', messageField, durationField)`
}
return fmt.Sprintf(`
trigger
%s
|eval(lambda: float("value"))
.as('value')
.keep()
|influxDBOut()
.create()
.database(outputDB)
.retentionPolicy(outputRP)
.measurement(outputMeasurement)
.tag('alertName', name)
.tag('triggerType', triggerType)
`, rename), nil
}

View File

@ -1,62 +0,0 @@
package kapacitor
import "testing"
import "github.com/influxdata/influxdb/v2/chronograf"
func TestInfluxOut(t *testing.T) {
tests := []struct {
name string
want chronograf.TICKScript
}{
{
name: "Test influxDBOut kapacitor node",
want: `trigger
|eval(lambda: "emitted")
.as('value')
.keep('value', messageField, durationField)
|eval(lambda: float("value"))
.as('value')
.keep()
|influxDBOut()
.create()
.database(outputDB)
.retentionPolicy(outputRP)
.measurement(outputMeasurement)
.tag('alertName', name)
.tag('triggerType', triggerType)
`,
},
}
for _, tt := range tests {
got, err := InfluxOut(chronograf.AlertRule{
Name: "name",
Trigger: "deadman",
Query: &chronograf.QueryConfig{
Fields: []chronograf.Field{
{
Value: "mean",
Type: "func",
Args: []chronograf.Field{
{
Value: "usage_user",
Type: "field",
},
},
},
},
},
})
if err != nil {
t.Errorf("%q. InfluxOut()) error = %v", tt.name, err)
continue
}
formatted, err := formatTick(got)
if err != nil {
t.Errorf("%q. formatTick() error = %v", tt.name, err)
continue
}
if formatted != tt.want {
t.Errorf("%q. InfluxOut() = %v, want %v", tt.name, formatted, tt.want)
}
}
}

View File

@ -1,113 +0,0 @@
package kapacitor
import (
"sync"
client "github.com/influxdata/kapacitor/client/v1"
)
const (
// ListTaskWorkers describes the number of workers concurrently fetching
// tasks from Kapacitor. This constant was chosen after some benchmarking
// work and should likely work well for quad-core systems
ListTaskWorkers = 4
// TaskGatherers is the number of workers collating responses from
// ListTaskWorkers. There can only be one without additional synchronization
// around the output buffer from ListTasks
TaskGatherers = 1
)
// ensure PaginatingKapaClient is a KapaClient
var _ KapaClient = &PaginatingKapaClient{}
// PaginatingKapaClient is a Kapacitor client that automatically navigates
// through Kapacitor's pagination to fetch all results
type PaginatingKapaClient struct {
KapaClient
FetchRate int // specifies the number of elements to fetch from Kapacitor at a time
}
// ListTasks lists all available tasks from Kapacitor, navigating pagination as
// it fetches them
func (p *PaginatingKapaClient) ListTasks(opts *client.ListTasksOptions) ([]client.Task, error) {
// only trigger auto-pagination with Offset=0 and Limit=0
if opts.Limit != 0 || opts.Offset != 0 {
return p.KapaClient.ListTasks(opts)
}
allTasks := []client.Task{}
optChan := make(chan client.ListTasksOptions)
taskChan := make(chan []client.Task, ListTaskWorkers)
done := make(chan struct{})
var once sync.Once
go p.generateKapacitorOptions(optChan, *opts, done)
var wg sync.WaitGroup
wg.Add(ListTaskWorkers)
for i := 0; i < ListTaskWorkers; i++ {
go p.fetchFromKapacitor(optChan, &wg, &once, taskChan, done)
}
var gatherWg sync.WaitGroup
gatherWg.Add(TaskGatherers)
go func() {
for task := range taskChan {
allTasks = append(allTasks, task...)
}
gatherWg.Done()
}()
wg.Wait()
close(taskChan)
gatherWg.Wait()
return allTasks, nil
}
// fetchFromKapacitor fetches a set of results from a kapacitor by reading a
// set of options from the provided optChan. Fetched tasks are pushed onto the
// provided taskChan
func (p *PaginatingKapaClient) fetchFromKapacitor(optChan chan client.ListTasksOptions, wg *sync.WaitGroup, closer *sync.Once, taskChan chan []client.Task, done chan struct{}) {
defer wg.Done()
for opt := range optChan {
resp, err := p.KapaClient.ListTasks(&opt)
if err != nil {
return
}
// break and stop all workers if we're done
if len(resp) == 0 {
closer.Do(func() {
close(done)
})
return
}
// handoff tasks to consumer
taskChan <- resp
}
}
// generateKapacitorOptions creates ListTasksOptions with incrementally greater
// Limit and Offset parameters, and inserts them into the provided optChan
func (p *PaginatingKapaClient) generateKapacitorOptions(optChan chan client.ListTasksOptions, opts client.ListTasksOptions, done chan struct{}) {
// ensure Limit and Offset start from known quantities
opts.Limit = p.FetchRate
opts.Offset = 0
for {
select {
case <-done:
close(optChan)
return
case optChan <- opts:
// nop
}
opts.Offset = p.FetchRate + opts.Offset
}
}

View File

@ -1,57 +0,0 @@
package kapacitor_test
import (
"testing"
"github.com/influxdata/influxdb/v2/chronograf/kapacitor"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
client "github.com/influxdata/kapacitor/client/v1"
)
func BenchmarkKapaClient100(b *testing.B) { benchmark_PaginatingKapaClient(100, b) }
func BenchmarkKapaClient1000(b *testing.B) { benchmark_PaginatingKapaClient(1000, b) }
func BenchmarkKapaClient10000(b *testing.B) { benchmark_PaginatingKapaClient(10000, b) }
func BenchmarkKapaClient100000(b *testing.B) { benchmark_PaginatingKapaClient(100000, b) }
var tasks []client.Task
func benchmark_PaginatingKapaClient(taskCount int, b *testing.B) {
b.StopTimer() // eliminate setup time
// create a mock client that will return a huge response from ListTasks
mockClient := &mocks.KapaClient{
ListTasksF: func(opts *client.ListTasksOptions) ([]client.Task, error) {
// create all the tasks
allTasks := make([]client.Task, taskCount)
begin := opts.Offset
end := opts.Offset + opts.Limit
if end > len(allTasks) {
end = len(allTasks)
}
if begin > len(allTasks) {
begin = end
}
return allTasks[begin:end], nil
},
}
pkap := kapacitor.PaginatingKapaClient{
KapaClient: mockClient,
FetchRate: 50,
}
opts := &client.ListTasksOptions{}
b.StartTimer() // eliminate setup time
// let the benchmark runner run ListTasks until it's satisfied
for n := 0; n < b.N; n++ {
// assignment is to avoid having the call optimized away
tasks, _ = pkap.ListTasks(opts)
}
}

View File

@ -1,61 +0,0 @@
package kapacitor_test
import (
"testing"
"github.com/influxdata/influxdb/v2/chronograf/kapacitor"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
client "github.com/influxdata/kapacitor/client/v1"
)
func Test_Kapacitor_PaginatingKapaClient(t *testing.T) {
const lenAllTasks = 227 // prime, to stress odd result sets
// create a mock client that will return a huge response from ListTasks
mockClient := &mocks.KapaClient{
ListTasksF: func(opts *client.ListTasksOptions) ([]client.Task, error) {
// create all the tasks
allTasks := []client.Task{}
for i := 0; i < lenAllTasks; i++ {
allTasks = append(allTasks, client.Task{})
}
begin := opts.Offset
end := opts.Offset + opts.Limit
if end > len(allTasks) {
end = len(allTasks)
}
if begin > len(allTasks) {
begin = end
}
return allTasks[begin:end], nil
},
}
pkap := kapacitor.PaginatingKapaClient{
KapaClient: mockClient,
FetchRate: 50,
}
opts := &client.ListTasksOptions{
Limit: 100,
Offset: 0,
}
// ensure 100 elems returned when calling mockClient directly
tasks, _ := pkap.ListTasks(opts)
if len(tasks) != 100 {
t.Error("Expected calling KapaClient's ListTasks to return", opts.Limit, "items. Received:", len(tasks))
}
// ensure PaginatingKapaClient returns _all_ tasks with 0 value for Limit and Offset
allOpts := &client.ListTasksOptions{}
allTasks, _ := pkap.ListTasks(allOpts)
if len(allTasks) != lenAllTasks {
t.Error("PaginatingKapaClient: Expected to find", lenAllTasks, "tasks but found", len(allTasks))
}
}

View File

@ -1,78 +0,0 @@
package kapacitor
import (
"fmt"
)
const (
greaterThan = "greater than"
lessThan = "less than"
lessThanEqual = "equal to or less than"
greaterThanEqual = "equal to or greater"
equal = "equal to"
notEqual = "not equal to"
insideRange = "inside range"
outsideRange = "outside range"
)
// kapaOperator converts UI strings to kapacitor operators
func kapaOperator(operator string) (string, error) {
switch operator {
case greaterThan:
return ">", nil
case lessThan:
return "<", nil
case lessThanEqual:
return "<=", nil
case greaterThanEqual:
return ">=", nil
case equal:
return "==", nil
case notEqual:
return "!=", nil
default:
return "", fmt.Errorf("invalid operator: %s is unknown", operator)
}
}
func chronoOperator(operator string) (string, error) {
switch operator {
case ">":
return greaterThan, nil
case "<":
return lessThan, nil
case "<=":
return lessThanEqual, nil
case ">=":
return greaterThanEqual, nil
case "==":
return equal, nil
case "!=":
return notEqual, nil
default:
return "", fmt.Errorf("invalid operator: %s is unknown", operator)
}
}
func rangeOperators(operator string) ([]string, error) {
switch operator {
case insideRange:
return []string{">=", "AND", "<="}, nil
case outsideRange:
return []string{"<", "OR", ">"}, nil
default:
return nil, fmt.Errorf("invalid operator: %s is unknown", operator)
}
}
func chronoRangeOperators(ops []string) (string, error) {
if len(ops) != 3 {
return "", fmt.Errorf("unknown operators")
}
if ops[0] == ">=" && ops[1] == "AND" && ops[2] == "<=" {
return insideRange, nil
} else if ops[0] == "<" && ops[1] == "OR" && ops[2] == ">" {
return outsideRange, nil
}
return "", fmt.Errorf("unknown operators")
}

View File

@ -1,37 +0,0 @@
package kapacitor
import (
"bytes"
"encoding/json"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/kapacitor/pipeline"
totick "github.com/influxdata/kapacitor/pipeline/tick"
)
// MarshalTICK converts tickscript to JSON representation
func MarshalTICK(script string) ([]byte, error) {
pipeline, err := newPipeline(chronograf.TICKScript(script))
if err != nil {
return nil, err
}
return json.MarshalIndent(pipeline, "", " ")
}
// UnmarshalTICK converts JSON to tickscript
func UnmarshalTICK(octets []byte) (string, error) {
pipe := &pipeline.Pipeline{}
if err := pipe.Unmarshal(octets); err != nil {
return "", err
}
ast := totick.AST{}
err := ast.Build(pipe)
if err != nil {
return "", err
}
var buf bytes.Buffer
ast.Program.Format(&buf, "", false)
return buf.String(), nil
}

View File

@ -1,341 +0,0 @@
package kapacitor
import (
"fmt"
"testing"
"github.com/sergi/go-diff/diffmatchpatch"
)
func TestPipelineJSON(t *testing.T) {
script := `var db = 'telegraf'
var rp = 'autogen'
var measurement = 'cpu'
var groupBy = ['host', 'cluster_id']
var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod')
var period = 10m
var every = 30s
var name = 'name'
var idVar = name + ':{{.Group}}'
var message = 'message'
var idTag = 'alertID'
var levelTag = 'level'
var messageField = 'message'
var durationField = 'duration'
var outputDB = 'chronograf'
var outputRP = 'autogen'
var outputMeasurement = 'alerts'
var triggerType = 'threshold'
var crit = 90
var data = stream
|from()
.database(db)
.retentionPolicy(rp)
.measurement(measurement)
.groupBy(groupBy)
.where(whereFilter)
|window()
.period(period)
.every(every)
.align()
|mean('usage_user')
.as('value')
var trigger = data
|alert()
.crit(lambda: "value" > crit)
.stateChangesOnly()
.message(message)
.id(idVar)
.idTag(idTag)
.levelTag(levelTag)
.messageField(messageField)
.durationField(durationField)
.slack()
.victorOps()
.email()
trigger
|influxDBOut()
.create()
.database(outputDB)
.retentionPolicy(outputRP)
.measurement(outputMeasurement)
.tag('alertName', name)
.tag('triggerType', triggerType)
trigger
|httpOut('output')
`
want := `var alert4 = stream
|from()
.database('telegraf')
.retentionPolicy('autogen')
.measurement('cpu')
.where(lambda: "cpu" == 'cpu_total' AND "host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod')
.groupBy('host', 'cluster_id')
|window()
.period(10m)
.every(30s)
.align()
|mean('usage_user')
.as('value')
|alert()
.id('name:{{.Group}}')
.message('message')
.details('{{ json . }}')
.crit(lambda: "value" > 90)
.history(21)
.levelTag('level')
.messageField('message')
.durationField('duration')
.idTag('alertID')
.stateChangesOnly()
.email()
.victorOps()
.slack()
alert4
|httpOut('output')
alert4
|influxDBOut()
.database('chronograf')
.retentionPolicy('autogen')
.measurement('alerts')
.buffer(1000)
.flushInterval(10s)
.create()
.tag('alertName', 'name')
.tag('triggerType', 'threshold')
`
octets, err := MarshalTICK(script)
if err != nil {
t.Fatalf("MarshalTICK unexpected error %v", err)
}
got, err := UnmarshalTICK(octets)
if err != nil {
t.Fatalf("UnmarshalTICK unexpected error %v", err)
}
if got != want {
fmt.Println(got)
diff := diffmatchpatch.New()
delta := diff.DiffMain(want, got, true)
t.Errorf("%s", diff.DiffPrettyText(delta))
}
}
func TestPipelineJSONDeadman(t *testing.T) {
script := `var db = 'telegraf'
var rp = 'autogen'
var measurement = 'cpu'
var groupBy = ['host', 'cluster_id']
var whereFilter = lambda: ("cpu" == 'cpu_total') AND ("host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod')
var period = 10m
var name = 'name'
var idVar = name + ':{{.Group}}'
var message = 'message'
var idTag = 'alertID'
var levelTag = 'level'
var messageField = 'message'
var durationField = 'duration'
var outputDB = 'chronograf'
var outputRP = 'autogen'
var outputMeasurement = 'alerts'
var triggerType = 'deadman'
var threshold = 0.0
var data = stream
|from()
.database(db)
.retentionPolicy(rp)
.measurement(measurement)
.groupBy(groupBy)
.where(whereFilter)
var trigger = data
|deadman(threshold, period)
.stateChangesOnly()
.message(message)
.id(idVar)
.idTag(idTag)
.levelTag(levelTag)
.messageField(messageField)
.durationField(durationField)
.slack()
.victorOps()
.email()
trigger
|eval(lambda: "emitted")
.as('value')
.keep('value', messageField, durationField)
|influxDBOut()
.create()
.database(outputDB)
.retentionPolicy(outputRP)
.measurement(outputMeasurement)
.tag('alertName', name)
.tag('triggerType', triggerType)
trigger
|httpOut('output')
`
wantA := `var from1 = stream
|from()
.database('telegraf')
.retentionPolicy('autogen')
.measurement('cpu')
.where(lambda: "cpu" == 'cpu_total' AND "host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod')
.groupBy('host', 'cluster_id')
var alert5 = from1
|stats(10m)
.align()
|derivative('emitted')
.as('emitted')
.unit(10m)
.nonNegative()
|alert()
.id('name:{{.Group}}')
.message('message')
.details('{{ json . }}')
.crit(lambda: "emitted" <= 0.0)
.history(21)
.levelTag('level')
.messageField('message')
.durationField('duration')
.idTag('alertID')
.stateChangesOnly()
.email()
.victorOps()
.slack()
alert5
|httpOut('output')
alert5
|eval(lambda: "emitted")
.as('value')
.tags()
.keep('value', 'message', 'duration')
|influxDBOut()
.database('chronograf')
.retentionPolicy('autogen')
.measurement('alerts')
.buffer(1000)
.flushInterval(10s)
.create()
.tag('alertName', 'name')
.tag('triggerType', 'deadman')
`
wantB := `var from1 = stream
|from()
.database('telegraf')
.retentionPolicy('autogen')
.measurement('cpu')
.where(lambda: "cpu" == 'cpu_total' AND "host" == 'acc-0eabc309-eu-west-1-data-3' OR "host" == 'prod')
.groupBy('host', 'cluster_id')
var alert5 = from1
|stats(10m)
.align()
|derivative('emitted')
.as('emitted')
.unit(10m)
.nonNegative()
|alert()
.id('name:{{.Group}}')
.message('message')
.details('{{ json . }}')
.crit(lambda: "emitted" <= 0.0)
.history(21)
.levelTag('level')
.messageField('message')
.durationField('duration')
.idTag('alertID')
.stateChangesOnly()
.email()
.victorOps()
.slack()
alert5
|eval(lambda: "emitted")
.as('value')
.tags()
.keep('value', 'message', 'duration')
|influxDBOut()
.database('chronograf')
.retentionPolicy('autogen')
.measurement('alerts')
.buffer(1000)
.flushInterval(10s)
.create()
.tag('alertName', 'name')
.tag('triggerType', 'deadman')
alert5
|httpOut('output')
`
octets, err := MarshalTICK(script)
if err != nil {
t.Fatalf("MarshalTICK unexpected error %v", err)
}
got, err := UnmarshalTICK(octets)
if err != nil {
t.Fatalf("UnmarshalTICK unexpected error %v", err)
}
if got != wantA && got != wantB {
want := wantA
fmt.Println("got")
fmt.Println(got)
fmt.Println("want")
fmt.Println(want)
diff := diffmatchpatch.New()
delta := diff.DiffMain(want, got, true)
t.Errorf("%s", diff.DiffPrettyText(delta))
}
}

View File

@ -1,50 +0,0 @@
package kapacitor
import (
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
var _ chronograf.Ticker = &Alert{}
// Alert defines alerting strings in template rendering
type Alert struct{}
// Generate creates a Tickscript from the alertrule
func (a *Alert) Generate(rule chronograf.AlertRule) (chronograf.TICKScript, error) {
vars, err := Vars(rule)
if err != nil {
return "", err
}
data, err := Data(rule)
if err != nil {
return "", err
}
trigger, err := Trigger(rule)
if err != nil {
return "", err
}
services, err := AlertServices(rule)
if err != nil {
return "", err
}
output, err := InfluxOut(rule)
if err != nil {
return "", err
}
http, err := HTTPOut(rule)
if err != nil {
return "", err
}
raw := fmt.Sprintf("%s\n%s\n%s%s\n%s\n%s", vars, data, trigger, services, output, http)
tick, err := formatTick(raw)
if err != nil {
return "", err
}
if err := validateTick(tick); err != nil {
return tick, err
}
return tick, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,162 +0,0 @@
package kapacitor
import (
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
const (
// Deadman triggers when data is missing for a period of time
Deadman = "deadman"
// Relative triggers when the value has changed compared to the past
Relative = "relative"
// Threshold triggers when value crosses a threshold
Threshold = "threshold"
// ThresholdRange triggers when a value is inside or outside a range
ThresholdRange = "range"
// ChangePercent triggers a relative alert when value changed by a percentage
ChangePercent = "% change"
// ChangeAmount triggers a relative alert when the value change by some amount
ChangeAmount = "change"
)
// AllAlerts are properties all alert types will have
var AllAlerts = `
.message(message)
.id(idVar)
.idTag(idTag)
.levelTag(levelTag)
.messageField(messageField)
.durationField(durationField)
`
// Details is used only for alerts that specify detail string
var Details = `
.details(details)
`
// ThresholdTrigger is the tickscript trigger for alerts that exceed a value
var ThresholdTrigger = `
var trigger = data
|alert()
.crit(lambda: "value" %s crit)
`
// ThresholdRangeTrigger is the alert when data does not intersect the range.
var ThresholdRangeTrigger = `
var trigger = data
|alert()
.crit(lambda: "value" %s lower %s "value" %s upper)
`
// RelativeAbsoluteTrigger compares one window of data versus another (current - past)
var RelativeAbsoluteTrigger = `
var past = data
|shift(shift)
var current = data
var trigger = past
|join(current)
.as('past', 'current')
|eval(lambda: float("current.value" - "past.value"))
.keep()
.as('value')
|alert()
.crit(lambda: "value" %s crit)
`
// RelativePercentTrigger compares one window of data versus another as a percent change.
var RelativePercentTrigger = `
var past = data
|shift(shift)
var current = data
var trigger = past
|join(current)
.as('past', 'current')
|eval(lambda: abs(float("current.value" - "past.value"))/float("past.value") * 100.0)
.keep()
.as('value')
|alert()
.crit(lambda: "value" %s crit)
`
// DeadmanTrigger checks if any data has been streamed in the last period of time
var DeadmanTrigger = `
var trigger = data|deadman(threshold, period)
`
// Trigger returns the trigger mechanism for a tickscript
func Trigger(rule chronograf.AlertRule) (string, error) {
var trigger string
var err error
switch rule.Trigger {
case Deadman:
trigger, err = DeadmanTrigger, nil
case Relative:
trigger, err = relativeTrigger(rule)
case Threshold:
if rule.TriggerValues.RangeValue == "" {
trigger, err = thresholdTrigger(rule)
} else {
trigger, err = thresholdRangeTrigger(rule)
}
default:
trigger, err = "", fmt.Errorf("unknown trigger type: %s", rule.Trigger)
}
if err != nil {
return "", err
}
// Only add stateChangesOnly to new rules
if rule.ID == "" {
trigger += `
.stateChangesOnly()
`
}
trigger += AllAlerts
if rule.Details != "" {
trigger += Details
}
return trigger, nil
}
func relativeTrigger(rule chronograf.AlertRule) (string, error) {
op, err := kapaOperator(rule.TriggerValues.Operator)
if err != nil {
return "", err
}
if rule.TriggerValues.Change == ChangePercent {
return fmt.Sprintf(RelativePercentTrigger, op), nil
} else if rule.TriggerValues.Change == ChangeAmount {
return fmt.Sprintf(RelativeAbsoluteTrigger, op), nil
} else {
return "", fmt.Errorf("unknown change type %s", rule.TriggerValues.Change)
}
}
func thresholdTrigger(rule chronograf.AlertRule) (string, error) {
op, err := kapaOperator(rule.TriggerValues.Operator)
if err != nil {
return "", err
}
return fmt.Sprintf(ThresholdTrigger, op), nil
}
func thresholdRangeTrigger(rule chronograf.AlertRule) (string, error) {
ops, err := rangeOperators(rule.TriggerValues.Operator)
if err != nil {
return "", err
}
var iops = make([]interface{}, len(ops))
for i, o := range ops {
iops[i] = o
}
return fmt.Sprintf(ThresholdRangeTrigger, iops...), nil
}

View File

@ -1,142 +0,0 @@
package kapacitor
import (
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestTrigger(t *testing.T) {
tests := []struct {
name string
rule chronograf.AlertRule
want string
wantErr bool
}{
{
name: "Test Deadman",
rule: chronograf.AlertRule{
Trigger: "deadman",
},
want: `var trigger = data
|deadman(threshold, period)
.stateChangesOnly()
.message(message)
.id(idVar)
.idTag(idTag)
.levelTag(levelTag)
.messageField(messageField)
.durationField(durationField)
`,
wantErr: false,
},
{
name: "Test Relative",
rule: chronograf.AlertRule{
Trigger: "relative",
TriggerValues: chronograf.TriggerValues{
Operator: "greater than",
Change: "% change",
},
},
want: `var past = data
|shift(shift)
var current = data
var trigger = past
|join(current)
.as('past', 'current')
|eval(lambda: abs(float("current.value" - "past.value")) / float("past.value") * 100.0)
.keep()
.as('value')
|alert()
.crit(lambda: "value" > crit)
.stateChangesOnly()
.message(message)
.id(idVar)
.idTag(idTag)
.levelTag(levelTag)
.messageField(messageField)
.durationField(durationField)
`,
wantErr: false,
},
{
name: "Test Relative percent change",
rule: chronograf.AlertRule{
Trigger: "relative",
TriggerValues: chronograf.TriggerValues{
Operator: "greater than",
Change: "change",
},
},
want: `var past = data
|shift(shift)
var current = data
var trigger = past
|join(current)
.as('past', 'current')
|eval(lambda: float("current.value" - "past.value"))
.keep()
.as('value')
|alert()
.crit(lambda: "value" > crit)
.stateChangesOnly()
.message(message)
.id(idVar)
.idTag(idTag)
.levelTag(levelTag)
.messageField(messageField)
.durationField(durationField)
`,
wantErr: false,
},
{
name: "Test Threshold",
rule: chronograf.AlertRule{
Trigger: "threshold",
TriggerValues: chronograf.TriggerValues{
Operator: "greater than",
},
},
want: `var trigger = data
|alert()
.crit(lambda: "value" > crit)
.stateChangesOnly()
.message(message)
.id(idVar)
.idTag(idTag)
.levelTag(levelTag)
.messageField(messageField)
.durationField(durationField)
`,
wantErr: false,
},
{
name: "Test Invalid",
rule: chronograf.AlertRule{
Trigger: "invalid",
},
want: ``,
wantErr: true,
},
}
for _, tt := range tests {
got, err := Trigger(tt.rule)
if (err != nil) != tt.wantErr {
t.Errorf("%q. Trigger() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
formatted, err := formatTick(got)
if err != nil {
t.Errorf("%q. formatTick() error = %v", tt.name, err)
continue
}
if string(formatted) != tt.want {
t.Errorf("%q. Trigger() = \n%v\n want \n%v\n", tt.name, string(formatted), tt.want)
}
}
}

View File

@ -1,67 +0,0 @@
package kapacitor
import (
"bytes"
"fmt"
"strings"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/tick"
"github.com/influxdata/kapacitor/tick/ast"
"github.com/influxdata/kapacitor/tick/stateful"
)
// ValidateAlert checks if the alert is a valid kapacitor alert service.
func ValidateAlert(service string) error {
// Simple tick script to check alert service.
// If a pipeline cannot be created then we know this is an invalid
// service. At least with this version of kapacitor!
script := fmt.Sprintf("stream|from()|alert()%s", service)
return validateTick(chronograf.TICKScript(script))
}
func formatTick(tickscript string) (chronograf.TICKScript, error) {
node, err := ast.Parse(tickscript)
if err != nil {
return "", err
}
output := new(bytes.Buffer)
node.Format(output, "", true)
return chronograf.TICKScript(output.String()), nil
}
func validateTick(script chronograf.TICKScript) error {
_, err := newPipeline(script)
return err
}
func newPipeline(script chronograf.TICKScript) (*pipeline.Pipeline, error) {
edge := pipeline.StreamEdge
if strings.Contains(string(script), "batch") {
edge = pipeline.BatchEdge
}
scope := stateful.NewScope()
predefinedVars := map[string]tick.Var{}
return pipeline.CreatePipeline(string(script), edge, scope, &deadman{}, predefinedVars)
}
// deadman is an empty implementation of a kapacitor DeadmanService to allow CreatePipeline
var _ pipeline.DeadmanService = &deadman{}
type deadman struct {
interval time.Duration
threshold float64
id string
message string
global bool
}
func (d deadman) Interval() time.Duration { return d.interval }
func (d deadman) Threshold() float64 { return d.threshold }
func (d deadman) Id() string { return d.id }
func (d deadman) Message() string { return d.message }
func (d deadman) Global() bool { return d.global }

View File

@ -1,52 +0,0 @@
package kapacitor
import "testing"
import "github.com/influxdata/influxdb/v2/chronograf"
func TestValidateAlert(t *testing.T) {
tests := []struct {
name string
service string
wantErr bool
}{
{
name: "Test valid template alert",
service: ".slack()",
wantErr: false,
},
{
name: "Test invalid template alert",
service: ".invalid()",
wantErr: true,
},
}
for _, tt := range tests {
if err := ValidateAlert(tt.service); (err != nil) != tt.wantErr {
t.Errorf("%q. ValidateAlert() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
}
}
func Test_validateTick(t *testing.T) {
tests := []struct {
name string
script chronograf.TICKScript
wantErr bool
}{
{
name: "Valid Script",
script: "stream|from()",
wantErr: false,
},
{
name: "Invalid Script",
script: "stream|nothing",
wantErr: true,
},
}
for _, tt := range tests {
if err := validateTick(tt.script); (err != nil) != tt.wantErr {
t.Errorf("%q. validateTick() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
}
}

View File

@ -1,271 +0,0 @@
package kapacitor
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/influxdata/influxdb/v2/chronograf"
)
var (
// Database is the output database for alerts.
Database = "chronograf"
// RP will be autogen for alerts because it is default.
RP = "autogen"
// Measurement will be alerts so that the app knows where to get this data.
Measurement = "alerts"
// IDTag is the output tag key for the ID of the alert
IDTag = "alertID"
//LevelTag is the output tag key for the alert level information
LevelTag = "level"
// MessageField is the output field key for the message in the alert
MessageField = "message"
// DurationField is the output field key for the duration of the alert
DurationField = "duration"
)
// Vars builds the top level vars for a kapacitor alert script
func Vars(rule chronograf.AlertRule) (string, error) {
common, err := commonVars(rule)
if err != nil {
return "", err
}
switch rule.Trigger {
case Threshold:
if rule.TriggerValues.RangeValue == "" {
vars := `
%s
var crit = %s
`
return fmt.Sprintf(vars, common, formatValue(rule.TriggerValues.Value)), nil
}
vars := `
%s
var lower = %s
var upper = %s
`
return fmt.Sprintf(vars,
common,
rule.TriggerValues.Value,
rule.TriggerValues.RangeValue), nil
case Relative:
vars := `
%s
var shift = %s
var crit = %s
`
return fmt.Sprintf(vars,
common,
rule.TriggerValues.Shift,
rule.TriggerValues.Value,
), nil
case Deadman:
vars := `
%s
var threshold = %s
`
return fmt.Sprintf(vars,
common,
"0.0", // deadman threshold hardcoded to zero
), nil
default:
return "", fmt.Errorf("unknown trigger mechanism")
}
}
// NotEmpty is an error collector checking if strings are empty values
type NotEmpty struct {
Err error
}
// Valid checks if string s is empty and if so reports an error using name
func (n *NotEmpty) Valid(name, s string) error {
if n.Err != nil {
return n.Err
}
if s == "" {
n.Err = fmt.Errorf("%s cannot be an empty string", name)
}
return n.Err
}
// Escape sanitizes strings with single quotes for kapacitor
func Escape(str string) string {
return strings.Replace(str, "'", `\'`, -1)
}
func commonVars(rule chronograf.AlertRule) (string, error) {
n := new(NotEmpty)
n.Valid("database", rule.Query.Database)
n.Valid("retention policy", rule.Query.RetentionPolicy)
n.Valid("measurement", rule.Query.Measurement)
n.Valid("alert name", rule.Name)
n.Valid("trigger type", rule.Trigger)
if n.Err != nil {
return "", n.Err
}
wind, err := window(rule)
if err != nil {
return "", err
}
common := `
var db = '%s'
var rp = '%s'
var measurement = '%s'
var groupBy = %s
var whereFilter = %s
%s
var name = '%s'
var idVar = %s
var message = '%s'
var idTag = '%s'
var levelTag = '%s'
var messageField = '%s'
var durationField = '%s'
var outputDB = '%s'
var outputRP = '%s'
var outputMeasurement = '%s'
var triggerType = '%s'
`
res := fmt.Sprintf(common,
Escape(rule.Query.Database),
Escape(rule.Query.RetentionPolicy),
Escape(rule.Query.Measurement),
groupBy(rule.Query),
whereFilter(rule.Query),
wind,
Escape(rule.Name),
idVar(rule.Query),
Escape(rule.Message),
IDTag,
LevelTag,
MessageField,
DurationField,
Database,
RP,
Measurement,
rule.Trigger,
)
if rule.Details != "" {
res += fmt.Sprintf(`
var details = '%s'
`, rule.Details)
}
return res, nil
}
// window is only used if deadman or threshold/relative with aggregate. Will return empty
// if no period.
func window(rule chronograf.AlertRule) (string, error) {
if rule.Trigger == Deadman {
if rule.TriggerValues.Period == "" {
return "", fmt.Errorf("period cannot be an empty string in deadman alert")
}
return fmt.Sprintf("var period = %s", rule.TriggerValues.Period), nil
}
// Period only makes sense if the field has a been grouped via a time duration.
for _, field := range rule.Query.Fields {
if field.Type == "func" {
n := new(NotEmpty)
n.Valid("group by time", rule.Query.GroupBy.Time)
n.Valid("every", rule.Every)
if n.Err != nil {
return "", n.Err
}
return fmt.Sprintf("var period = %s\nvar every = %s", rule.Query.GroupBy.Time, rule.Every), nil
}
}
return "", nil
}
func groupBy(q *chronograf.QueryConfig) string {
groups := []string{}
if q != nil {
for _, tag := range q.GroupBy.Tags {
groups = append(groups, fmt.Sprintf("'%s'", tag))
}
}
return "[" + strings.Join(groups, ",") + "]"
}
func idVar(q *chronograf.QueryConfig) string {
if len(q.GroupBy.Tags) > 0 {
return `name + ':{{.Group}}'`
}
return "name"
}
func field(q *chronograf.QueryConfig) (string, error) {
if q == nil {
return "", fmt.Errorf("no fields set in query")
}
if len(q.Fields) != 1 {
return "", fmt.Errorf("expect only one field but found %d", len(q.Fields))
}
field := q.Fields[0]
if field.Type == "func" {
for _, arg := range field.Args {
if arg.Type == "field" {
f, ok := arg.Value.(string)
if !ok {
return "", fmt.Errorf("field value %v is should be string but is %T", arg.Value, arg.Value)
}
return f, nil
}
}
return "", fmt.Errorf("no fields set in query")
}
f, ok := field.Value.(string)
if !ok {
return "", fmt.Errorf("field value %v is should be string but is %T", field.Value, field.Value)
}
return f, nil
}
func whereFilter(q *chronograf.QueryConfig) string {
if q != nil {
operator := "=="
if !q.AreTagsAccepted {
operator = "!="
}
outer := []string{}
for tag, values := range q.Tags {
inner := []string{}
for _, value := range values {
inner = append(inner, fmt.Sprintf(`"%s" %s '%s'`, tag, operator, value))
}
outer = append(outer, "("+strings.Join(inner, " OR ")+")")
}
if len(outer) > 0 {
sort.Strings(outer)
return "lambda: " + strings.Join(outer, " AND ")
}
}
return "lambda: TRUE"
}
// formatValue return the same string if a numeric type or if it is a string
// will return it as a kapacitor formatted single-quoted string
func formatValue(value string) string {
// Test if numeric if it can be converted to a float
if _, err := strconv.ParseFloat(value, 64); err == nil {
return value
}
// If the value is a kapacitor boolean value perform no formatting
if value == "TRUE" || value == "FALSE" {
return value
}
return "'" + Escape(value) + "'"
}

View File

@ -1,87 +0,0 @@
package kapacitor
import (
"fmt"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestVarsCritStringEqual(t *testing.T) {
alert := chronograf.AlertRule{
Name: "name",
Trigger: "threshold",
TriggerValues: chronograf.TriggerValues{
Operator: "equal to",
Value: "DOWN",
},
Every: "30s",
Query: &chronograf.QueryConfig{
Database: "telegraf",
Measurement: "haproxy",
RetentionPolicy: "autogen",
Fields: []chronograf.Field{
{
Value: "status",
Type: "field",
},
},
GroupBy: chronograf.GroupBy{
Time: "10m",
Tags: []string{"pxname"},
},
AreTagsAccepted: true,
},
}
raw, err := Vars(alert)
if err != nil {
fmt.Printf("%s", raw)
t.Fatalf("Error generating alert: %v %s", err, raw)
}
tick, err := formatTick(raw)
if err != nil {
t.Errorf("Error formatting alert: %v %s", err, raw)
}
if err := validateTick(tick); err != nil {
t.Errorf("Error validating alert: %v %s", err, tick)
}
}
func Test_formatValue(t *testing.T) {
tests := []struct {
name string
value string
want string
}{
{
name: "parses floats",
value: "3.14",
want: "3.14",
},
{
name: "parses booleans",
value: "TRUE",
want: "TRUE",
},
{
name: "single quotes for strings",
value: "up",
want: "'up'",
},
{
name: "handles escaping of single quotes",
value: "down's",
want: "'down\\'s'",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := formatValue(tt.value); got != tt.want {
t.Errorf("formatValue() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,26 +0,0 @@
# List any generated files here
TARGETS =
# List any source files used to generate the targets here
SOURCES =
# List any directories that have their own Makefile here
SUBDIRS = dist server canned
# Default target
all: $(SUBDIRS) $(TARGETS)
# Recurse into subdirs for same make goal
$(SUBDIRS):
$(MAKE) -C $@ $(MAKECMDGOALS)
# Clean all targets recursively
clean: $(SUBDIRS)
rm -f $(TARGETS)
# Define go generate if not already defined
GO_GENERATE := go generate
# Run go generate for the targets
$(TARGETS): $(SOURCES)
$(GO_GENERATE) -x
.PHONY: all clean $(SUBDIRS)

View File

@ -1,94 +0,0 @@
package bolt
import (
"time"
bolt "go.etcd.io/bbolt"
)
// SchemaVersionBucket stores ids of completed migrations
var SchemaVersionBucket = []byte("SchemaVersions")
// IsMigrationComplete checks for the presence of a particular migration id
func IsMigrationComplete(db *bolt.DB, id string) (bool, error) {
complete := false
if err := db.View(func(tx *bolt.Tx) error {
migration := tx.Bucket(SchemaVersionBucket).Get([]byte(id))
if migration != nil {
complete = true
}
return nil
}); err != nil {
return true, err
}
return complete, nil
}
// MarkMigrationAsComplete adds the migration id to the schema bucket
func MarkMigrationAsComplete(db *bolt.DB, id string) error {
if err := db.Update(func(tx *bolt.Tx) error {
now := time.Now().UTC().Format(time.RFC3339)
return tx.Bucket(SchemaVersionBucket).Put([]byte(id), []byte(now))
}); err != nil {
return err
}
return nil
}
// Migration defines a database state/schema transition
// ID: After the migration is run, this id is stored in the database.
// We don't want to run a state transition twice
// Up: The forward-transition function. After a version upgrade, a number
// of these will run on database startup in order to bring a user's
// schema in line with struct definitions in the new version.
// Down: The backward-transition function. We don't expect these to be
// run on a user's database -- if the user needs to rollback
// to a previous version, it will be easier for them to replace
// their current database with one of their backups. The primary
// purpose of a Down() function is to help contributors move across
// development branches that have different schema definitions.
type Migration struct {
ID string
Up func(db *bolt.DB) error
Down func(db *bolt.DB) error
}
// Migrate runs one migration's Up() function, if it has not already been run
func (m Migration) Migrate(client *Client) error {
complete, err := IsMigrationComplete(client.db, m.ID)
if err != nil {
return err
}
if complete {
return nil
}
if client.logger != nil {
client.logger.Info("Running migration ", m.ID, "")
}
if err = m.Up(client.db); err != nil {
return err
}
return MarkMigrationAsComplete(client.db, m.ID)
}
// MigrateAll iterates through all known migrations and runs them in order
func MigrateAll(client *Client) error {
for _, m := range migrations {
err := m.Migrate(client)
if err != nil {
return err
}
}
return nil
}
var migrations = []Migration{
changeIntervalToDuration,
}

View File

@ -1,50 +0,0 @@
package bolt_test
import (
"context"
"errors"
"io/ioutil"
"os"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt"
"github.com/influxdata/influxdb/v2/chronograf/mocks"
)
// TestNow is a set time for testing.
var TestNow = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
// TestClient wraps *bolt.Client.
type TestClient struct {
*bolt.Client
}
// NewTestClient creates new *bolt.Client with a set time and temp path.
func NewTestClient() (*TestClient, error) {
f, err := ioutil.TempFile("", "chronograf-bolt-")
if err != nil {
return nil, errors.New("unable to open temporary boltdb file")
}
f.Close()
c := &TestClient{
Client: bolt.NewClient(),
}
c.Path = f.Name()
c.Now = func() time.Time { return TestNow }
build := chronograf.BuildInfo{
Version: "version",
Commit: "commit",
}
c.Open(context.TODO(), mocks.NewLogger(), build)
return c, nil
}
func (c *TestClient) Close() error {
defer os.Remove(c.Path)
return c.Client.Close()
}

View File

@ -1,83 +0,0 @@
package bolt
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
bolt "go.etcd.io/bbolt"
)
// Ensure BuildStore struct implements chronograf.BuildStore interface
var _ chronograf.BuildStore = &BuildStore{}
// BuildBucket is the bolt bucket used to store Chronograf build information
var BuildBucket = []byte("Build")
// BuildKey is the constant key used in the bolt bucket
var BuildKey = []byte("build")
// BuildStore is a bolt implementation to store Chronograf build information
type BuildStore struct {
client *Client
}
// Get retrieves Chronograf build information from the database
func (s *BuildStore) Get(ctx context.Context) (chronograf.BuildInfo, error) {
var build chronograf.BuildInfo
if err := s.client.db.View(func(tx *bolt.Tx) error {
var err error
build, err = s.get(ctx, tx)
if err != nil {
return err
}
return nil
}); err != nil {
return chronograf.BuildInfo{}, err
}
return build, nil
}
// Update overwrites the current Chronograf build information in the database
func (s *BuildStore) Update(ctx context.Context, build chronograf.BuildInfo) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
return s.update(ctx, build, tx)
}); err != nil {
return err
}
return nil
}
// Migrate simply stores the current version in the database
func (s *BuildStore) Migrate(ctx context.Context, build chronograf.BuildInfo) error {
return s.Update(ctx, build)
}
// get retrieves the current build, falling back to a default when missing
func (s *BuildStore) get(ctx context.Context, tx *bolt.Tx) (chronograf.BuildInfo, error) {
var build chronograf.BuildInfo
defaultBuild := chronograf.BuildInfo{
Version: "pre-1.4.0.0",
Commit: "",
}
if bucket := tx.Bucket(BuildBucket); bucket == nil {
return defaultBuild, nil
} else if v := bucket.Get(BuildKey); v == nil {
return defaultBuild, nil
} else if err := internal.UnmarshalBuild(v, &build); err != nil {
return build, err
}
return build, nil
}
func (s *BuildStore) update(ctx context.Context, build chronograf.BuildInfo, tx *bolt.Tx) error {
if v, err := internal.MarshalBuild(build); err != nil {
return err
} else if err := tx.Bucket(BuildBucket).Put(BuildKey, v); err != nil {
return err
}
return nil
}

View File

@ -1,54 +0,0 @@
package bolt_test
// import (
// "testing"
// "github.com/google/go-cmp/cmp"
// "github.com/influxdata/influxdb/chronograf"
// )
// func
// func TestBuildStore_Get(t *testing.T) {
// type wants struct {
// build *chronograf.BuildInfo
// err error
// }
// tests := []struct {
// name string
// wants wants
// }{
// {
// name: "When the build info is missing",
// wants: wants{
// build: &chronograf.BuildInfo{
// Version: "pre-1.4.0.0",
// Commit: "",
// },
// },
// },
// }
// for _, tt := range tests {
// client, err := NewTestClient()
// if err != nil {
// t.Fatal(err)
// }
// if err := client.Open(context.TODO()); err != nil {
// t.Fatal(err)
// }
// defer client.Close()
// b := client.BuildStore
// got, err := b.Get(context.Background())
// if (tt.wants.err != nil) != (err != nil) {
// t.Errorf("%q. BuildStore.Get() error = %v, wantErr %v", tt.name, err, tt.wants.err)
// continue
// }
// if diff := cmp.Diff(got, tt.wants.build); diff != "" {
// t.Errorf("%q. BuildStore.Get():\n-got/+want\ndiff %s", tt.name, diff)
// }
// }
// }
// func TestBuildStore_Update(t *testing.T) {
// }

File diff suppressed because it is too large Load Diff

View File

@ -1,278 +0,0 @@
package bolt
import (
"context"
"fmt"
"io"
"os"
"path"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/id"
bolt "go.etcd.io/bbolt"
)
// Client is a client for the boltDB data store.
type Client struct {
Path string
db *bolt.DB
logger chronograf.Logger
isNew bool
Now func() time.Time
LayoutIDs chronograf.ID
BuildStore *BuildStore
SourcesStore *SourcesStore
ServersStore *ServersStore
LayoutsStore *LayoutsStore
DashboardsStore *DashboardsStore
UsersStore *UsersStore
OrganizationsStore *OrganizationsStore
ConfigStore *ConfigStore
MappingsStore *MappingsStore
OrganizationConfigStore *OrganizationConfigStore
}
// NewClient initializes all stores
func NewClient() *Client {
c := &Client{Now: time.Now}
c.BuildStore = &BuildStore{client: c}
c.SourcesStore = &SourcesStore{client: c}
c.ServersStore = &ServersStore{client: c}
c.LayoutsStore = &LayoutsStore{
client: c,
IDs: &id.UUID{},
}
c.DashboardsStore = &DashboardsStore{
client: c,
IDs: &id.UUID{},
}
c.UsersStore = &UsersStore{client: c}
c.OrganizationsStore = &OrganizationsStore{client: c}
c.ConfigStore = &ConfigStore{client: c}
c.MappingsStore = &MappingsStore{client: c}
c.OrganizationConfigStore = &OrganizationConfigStore{client: c}
return c
}
// WithDB sets the boltdb database for a client. It should not be called
// after a call to Open.
func (c *Client) WithDB(db *bolt.DB) {
c.db = db
}
// Option to change behavior of Open()
type Option interface {
Backup() bool
}
// WithBackup returns a Backup
func WithBackup() Option {
return Backup{}
}
// Backup tells Open to perform a backup prior to initialization
type Backup struct {
}
// Backup returns true
func (b Backup) Backup() bool {
return true
}
// Open / create boltDB file.
func (c *Client) Open(ctx context.Context, logger chronograf.Logger, build chronograf.BuildInfo, opts ...Option) error {
if c.db == nil {
if _, err := os.Stat(c.Path); os.IsNotExist(err) {
c.isNew = true
} else if err != nil {
return err
}
// Open database file.
db, err := bolt.Open(c.Path, 0600, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
return fmt.Errorf("unable to open boltdb; is there a chronograf already running? %v", err)
}
c.db = db
c.logger = logger
for _, opt := range opts {
if opt.Backup() {
if err = c.backup(ctx, build); err != nil {
return fmt.Errorf("unable to backup your database prior to migrations: %v", err)
}
}
}
}
if err := c.initialize(ctx); err != nil {
return fmt.Errorf("unable to boot boltdb: %v", err)
}
if err := c.migrate(ctx, build); err != nil {
return fmt.Errorf("unable to migrate boltdb: %v", err)
}
return nil
}
// initialize creates Buckets that are missing
func (c *Client) initialize(ctx context.Context) error {
if err := c.db.Update(func(tx *bolt.Tx) error {
// Always create SchemaVersions bucket.
if _, err := tx.CreateBucketIfNotExists(SchemaVersionBucket); err != nil {
return err
}
// Always create Organizations bucket.
if _, err := tx.CreateBucketIfNotExists(OrganizationsBucket); err != nil {
return err
}
// Always create Sources bucket.
if _, err := tx.CreateBucketIfNotExists(SourcesBucket); err != nil {
return err
}
// Always create Servers bucket.
if _, err := tx.CreateBucketIfNotExists(ServersBucket); err != nil {
return err
}
// Always create Layouts bucket.
if _, err := tx.CreateBucketIfNotExists(LayoutsBucket); err != nil {
return err
}
// Always create Dashboards bucket.
if _, err := tx.CreateBucketIfNotExists(DashboardsBucket); err != nil {
return err
}
// Always create Users bucket.
if _, err := tx.CreateBucketIfNotExists(UsersBucket); err != nil {
return err
}
// Always create Config bucket.
if _, err := tx.CreateBucketIfNotExists(ConfigBucket); err != nil {
return err
}
// Always create Build bucket.
if _, err := tx.CreateBucketIfNotExists(BuildBucket); err != nil {
return err
}
// Always create Mapping bucket.
if _, err := tx.CreateBucketIfNotExists(MappingsBucket); err != nil {
return err
}
// Always create OrganizationConfig bucket.
if _, err := tx.CreateBucketIfNotExists(OrganizationConfigBucket); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}
// migrate moves data from an old schema to a new schema in each Store
func (c *Client) migrate(ctx context.Context, build chronograf.BuildInfo) error {
if c.db != nil {
// Runtime migrations
if err := c.OrganizationsStore.Migrate(ctx); err != nil {
return err
}
if err := c.SourcesStore.Migrate(ctx); err != nil {
return err
}
if err := c.ServersStore.Migrate(ctx); err != nil {
return err
}
if err := c.LayoutsStore.Migrate(ctx); err != nil {
return err
}
if err := c.DashboardsStore.Migrate(ctx); err != nil {
return err
}
if err := c.ConfigStore.Migrate(ctx); err != nil {
return err
}
if err := c.BuildStore.Migrate(ctx, build); err != nil {
return err
}
if err := c.MappingsStore.Migrate(ctx); err != nil {
return err
}
if err := c.OrganizationConfigStore.Migrate(ctx); err != nil {
return err
}
MigrateAll(c)
}
return nil
}
// Close the connection to the bolt database
func (c *Client) Close() error {
if c.db != nil {
return c.db.Close()
}
return nil
}
// copy creates a copy of the database in toFile
func (c *Client) copy(ctx context.Context, version string) error {
backupDir := path.Join(path.Dir(c.Path), "backup")
if _, err := os.Stat(backupDir); os.IsNotExist(err) {
if err = os.Mkdir(backupDir, 0700); err != nil {
return err
}
} else if err != nil {
return err
}
fromFile, err := os.Open(c.Path)
if err != nil {
return err
}
defer fromFile.Close()
toName := fmt.Sprintf("%s.%s", path.Base(c.Path), version)
toPath := path.Join(backupDir, toName)
toFile, err := os.OpenFile(toPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer toFile.Close()
_, err = io.Copy(toFile, fromFile)
if err != nil {
return err
}
c.logger.Info("Successfully created ", toPath)
return nil
}
// backup makes a copy of the database to the backup/ directory, if necessary:
// - If this is a fresh install, don't create a backup and store the current version
// - If we are on the same version, don't create a backup
// - If the version has changed, create a backup and store the current version
func (c *Client) backup(ctx context.Context, build chronograf.BuildInfo) error {
lastBuild, err := c.BuildStore.Get(ctx)
if err != nil {
return err
}
if lastBuild.Version == build.Version {
return nil
}
if c.isNew {
return nil
}
// The database was pre-existing, and the version has changed
// and so create a backup
c.logger.Info("Moving from version ", lastBuild.Version)
c.logger.Info("Moving to version ", build.Version)
return c.copy(ctx, lastBuild.Version)
}

View File

@ -1,71 +0,0 @@
package bolt
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
bolt "go.etcd.io/bbolt"
)
// Ensure ConfigStore implements chronograf.ConfigStore.
var _ chronograf.ConfigStore = &ConfigStore{}
// ConfigBucket is used to store chronograf application state
var ConfigBucket = []byte("ConfigV1")
// configID is the boltDB key where the configuration object is stored
var configID = []byte("config/v1")
// ConfigStore uses bolt to store and retrieve global
// application configuration
type ConfigStore struct {
client *Client
}
func (s *ConfigStore) Migrate(ctx context.Context) error {
if _, err := s.Get(ctx); err != nil {
return s.Initialize(ctx)
}
return nil
}
func (s *ConfigStore) Initialize(ctx context.Context) error {
cfg := chronograf.Config{
Auth: chronograf.AuthConfig{
SuperAdminNewUsers: false,
},
}
return s.Update(ctx, &cfg)
}
func (s *ConfigStore) Get(ctx context.Context) (*chronograf.Config, error) {
var cfg chronograf.Config
err := s.client.db.View(func(tx *bolt.Tx) error {
v := tx.Bucket(ConfigBucket).Get(configID)
if v == nil {
return chronograf.ErrConfigNotFound
}
return internal.UnmarshalConfig(v, &cfg)
})
if err != nil {
return nil, err
}
return &cfg, nil
}
func (s *ConfigStore) Update(ctx context.Context, cfg *chronograf.Config) error {
if cfg == nil {
return fmt.Errorf("config provided was nil")
}
return s.client.db.Update(func(tx *bolt.Tx) error {
if v, err := internal.MarshalConfig(cfg); err != nil {
return err
} else if err := tx.Bucket(ConfigBucket).Put(configID, v); err != nil {
return err
}
return nil
})
}

View File

@ -1,105 +0,0 @@
package bolt_test
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestConfig_Get(t *testing.T) {
type wants struct {
config *chronograf.Config
err error
}
tests := []struct {
name string
wants wants
}{
{
name: "Get config",
wants: wants{
config: &chronograf.Config{
Auth: chronograf.AuthConfig{
SuperAdminNewUsers: false,
},
},
},
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.ConfigStore
got, err := s.Get(context.Background())
if (tt.wants.err != nil) != (err != nil) {
t.Errorf("%q. ConfigStore.Get() error = %v, wantErr %v", tt.name, err, tt.wants.err)
continue
}
if diff := cmp.Diff(got, tt.wants.config); diff != "" {
t.Errorf("%q. ConfigStore.Get():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}
func TestConfig_Update(t *testing.T) {
type args struct {
config *chronograf.Config
}
type wants struct {
config *chronograf.Config
err error
}
tests := []struct {
name string
args args
wants wants
}{
{
name: "Set config",
args: args{
config: &chronograf.Config{
Auth: chronograf.AuthConfig{
SuperAdminNewUsers: false,
},
},
},
wants: wants{
config: &chronograf.Config{
Auth: chronograf.AuthConfig{
SuperAdminNewUsers: false,
},
},
},
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.ConfigStore
err = s.Update(context.Background(), tt.args.config)
if (tt.wants.err != nil) != (err != nil) {
t.Errorf("%q. ConfigStore.Get() error = %v, wantErr %v", tt.name, err, tt.wants.err)
continue
}
got, _ := s.Get(context.Background())
if (tt.wants.err != nil) != (err != nil) {
t.Errorf("%q. ConfigStore.Get() error = %v, wantErr %v", tt.name, err, tt.wants.err)
continue
}
if diff := cmp.Diff(got, tt.wants.config); diff != "" {
t.Errorf("%q. ConfigStore.Get():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}

View File

@ -1,194 +0,0 @@
package bolt
import (
"context"
"strconv"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
bolt "go.etcd.io/bbolt"
)
// Ensure DashboardsStore implements chronograf.DashboardsStore.
var _ chronograf.DashboardsStore = &DashboardsStore{}
// DashboardsBucket is the bolt bucket dashboards are stored in
var DashboardsBucket = []byte("Dashoard") // N.B. leave the misspelling for backwards-compat!
// DashboardsStore is the bolt implementation of storing dashboards
type DashboardsStore struct {
client *Client
IDs chronograf.ID
}
// AddIDs is a migration function that adds ID information to existing dashboards
func (d *DashboardsStore) AddIDs(ctx context.Context, boards []chronograf.Dashboard) error {
for _, board := range boards {
update := false
for i, cell := range board.Cells {
// If there are is no id set, we generate one and update the dashboard
if cell.ID == "" {
id, err := d.IDs.Generate()
if err != nil {
return err
}
cell.ID = id
board.Cells[i] = cell
update = true
}
}
if !update {
continue
}
if err := d.Update(ctx, board); err != nil {
return err
}
}
return nil
}
// Migrate updates the dashboards at runtime
func (d *DashboardsStore) Migrate(ctx context.Context) error {
// 1. Add UUIDs to cells without one
boards, err := d.All(ctx)
if err != nil {
return err
}
if err := d.AddIDs(ctx, boards); err != nil {
return nil
}
defaultOrg, err := d.client.OrganizationsStore.DefaultOrganization(ctx)
if err != nil {
return err
}
for _, board := range boards {
if board.Organization == "" {
board.Organization = defaultOrg.ID
if err := d.Update(ctx, board); err != nil {
return nil
}
}
}
return nil
}
// All returns all known dashboards
func (d *DashboardsStore) All(ctx context.Context) ([]chronograf.Dashboard, error) {
var srcs []chronograf.Dashboard
if err := d.client.db.View(func(tx *bolt.Tx) error {
if err := tx.Bucket(DashboardsBucket).ForEach(func(k, v []byte) error {
var src chronograf.Dashboard
if err := internal.UnmarshalDashboard(v, &src); err != nil {
return err
}
srcs = append(srcs, src)
return nil
}); err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return srcs, nil
}
// Add creates a new Dashboard in the DashboardsStore
func (d *DashboardsStore) Add(ctx context.Context, src chronograf.Dashboard) (chronograf.Dashboard, error) {
if err := d.client.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(DashboardsBucket)
id, _ := b.NextSequence()
src.ID = chronograf.DashboardID(id)
// TODO: use FormatInt
strID := strconv.Itoa(int(id))
for i, cell := range src.Cells {
cid, err := d.IDs.Generate()
if err != nil {
return err
}
cell.ID = cid
src.Cells[i] = cell
}
v, err := internal.MarshalDashboard(src)
if err != nil {
return err
}
return b.Put([]byte(strID), v)
}); err != nil {
return chronograf.Dashboard{}, err
}
return src, nil
}
// Get returns a Dashboard if the id exists.
func (d *DashboardsStore) Get(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) {
var src chronograf.Dashboard
if err := d.client.db.View(func(tx *bolt.Tx) error {
strID := strconv.Itoa(int(id))
if v := tx.Bucket(DashboardsBucket).Get([]byte(strID)); v == nil {
return chronograf.ErrDashboardNotFound
} else if err := internal.UnmarshalDashboard(v, &src); err != nil {
return err
}
return nil
}); err != nil {
return chronograf.Dashboard{}, err
}
return src, nil
}
// Delete the dashboard from DashboardsStore
func (d *DashboardsStore) Delete(ctx context.Context, dash chronograf.Dashboard) error {
if err := d.client.db.Update(func(tx *bolt.Tx) error {
strID := strconv.Itoa(int(dash.ID))
if err := tx.Bucket(DashboardsBucket).Delete([]byte(strID)); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}
// Update the dashboard in DashboardsStore
func (d *DashboardsStore) Update(ctx context.Context, dash chronograf.Dashboard) error {
if err := d.client.db.Update(func(tx *bolt.Tx) error {
// Get an existing dashboard with the same ID.
b := tx.Bucket(DashboardsBucket)
strID := strconv.Itoa(int(dash.ID))
if v := b.Get([]byte(strID)); v == nil {
return chronograf.ErrDashboardNotFound
}
for i, cell := range dash.Cells {
if cell.ID != "" {
continue
}
cid, err := d.IDs.Generate()
if err != nil {
return err
}
cell.ID = cid
dash.Cells[i] = cell
}
if v, err := internal.MarshalDashboard(dash); err != nil {
return err
} else if err := b.Put([]byte(strID), v); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}

View File

@ -1,852 +0,0 @@
package internal
import (
"encoding/json"
"fmt"
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/v2/chronograf"
)
//go:generate protoc --plugin ../../../scripts/protoc-gen-gogo --gogo_out=. internal.proto
// MarshalBuild encodes a build to binary protobuf format.
func MarshalBuild(b chronograf.BuildInfo) ([]byte, error) {
return proto.Marshal(&BuildInfo{
Version: b.Version,
Commit: b.Commit,
})
}
// UnmarshalBuild decodes a build from binary protobuf data.
func UnmarshalBuild(data []byte, b *chronograf.BuildInfo) error {
var pb BuildInfo
if err := proto.Unmarshal(data, &pb); err != nil {
return err
}
b.Version = pb.Version
b.Commit = pb.Commit
return nil
}
// MarshalSource encodes a source to binary protobuf format.
func MarshalSource(s chronograf.Source) ([]byte, error) {
return proto.Marshal(&Source{
ID: int64(s.ID),
Name: s.Name,
Type: s.Type,
Username: s.Username,
Password: s.Password,
SharedSecret: s.SharedSecret,
URL: s.URL,
MetaURL: s.MetaURL,
InsecureSkipVerify: s.InsecureSkipVerify,
Default: s.Default,
Telegraf: s.Telegraf,
Organization: s.Organization,
Role: s.Role,
DefaultRP: s.DefaultRP,
})
}
// UnmarshalSource decodes a source from binary protobuf data.
func UnmarshalSource(data []byte, s *chronograf.Source) error {
var pb Source
if err := proto.Unmarshal(data, &pb); err != nil {
return err
}
s.ID = int(pb.ID)
s.Name = pb.Name
s.Type = pb.Type
s.Username = pb.Username
s.Password = pb.Password
s.SharedSecret = pb.SharedSecret
s.URL = pb.URL
s.MetaURL = pb.MetaURL
s.InsecureSkipVerify = pb.InsecureSkipVerify
s.Default = pb.Default
s.Telegraf = pb.Telegraf
s.Organization = pb.Organization
s.Role = pb.Role
s.DefaultRP = pb.DefaultRP
return nil
}
// MarshalServer encodes a server to binary protobuf format.
func MarshalServer(s chronograf.Server) ([]byte, error) {
var (
metadata []byte
err error
)
metadata, err = json.Marshal(s.Metadata)
if err != nil {
return nil, err
}
return proto.Marshal(&Server{
ID: int64(s.ID),
SrcID: int64(s.SrcID),
Name: s.Name,
Username: s.Username,
Password: s.Password,
URL: s.URL,
Active: s.Active,
Organization: s.Organization,
InsecureSkipVerify: s.InsecureSkipVerify,
Type: s.Type,
MetadataJSON: string(metadata),
})
}
// UnmarshalServer decodes a server from binary protobuf data.
func UnmarshalServer(data []byte, s *chronograf.Server) error {
var pb Server
if err := proto.Unmarshal(data, &pb); err != nil {
return err
}
s.Metadata = make(map[string]interface{})
if len(pb.MetadataJSON) > 0 {
if err := json.Unmarshal([]byte(pb.MetadataJSON), &s.Metadata); err != nil {
return err
}
}
s.ID = int(pb.ID)
s.SrcID = int(pb.SrcID)
s.Name = pb.Name
s.Username = pb.Username
s.Password = pb.Password
s.URL = pb.URL
s.Active = pb.Active
s.Organization = pb.Organization
s.InsecureSkipVerify = pb.InsecureSkipVerify
s.Type = pb.Type
return nil
}
// MarshalLayout encodes a layout to binary protobuf format.
func MarshalLayout(l chronograf.Layout) ([]byte, error) {
cells := make([]*Cell, len(l.Cells))
for i, c := range l.Cells {
queries := make([]*Query, len(c.Queries))
for j, q := range c.Queries {
r := new(Range)
if q.Range != nil {
r.Upper, r.Lower = q.Range.Upper, q.Range.Lower
}
queries[j] = &Query{
Command: q.Command,
DB: q.DB,
RP: q.RP,
GroupBys: q.GroupBys,
Wheres: q.Wheres,
Label: q.Label,
Range: r,
}
}
axes := make(map[string]*Axis, len(c.Axes))
for a, r := range c.Axes {
axes[a] = &Axis{
Bounds: r.Bounds,
Label: r.Label,
}
}
cells[i] = &Cell{
X: c.X,
Y: c.Y,
W: c.W,
H: c.H,
I: c.I,
Name: c.Name,
Queries: queries,
Type: c.Type,
Axes: axes,
}
}
return proto.Marshal(&Layout{
ID: l.ID,
Measurement: l.Measurement,
Application: l.Application,
Autoflow: l.Autoflow,
Cells: cells,
})
}
// UnmarshalLayout decodes a layout from binary protobuf data.
func UnmarshalLayout(data []byte, l *chronograf.Layout) error {
var pb Layout
if err := proto.Unmarshal(data, &pb); err != nil {
return err
}
l.ID = pb.ID
l.Measurement = pb.Measurement
l.Application = pb.Application
l.Autoflow = pb.Autoflow
cells := make([]chronograf.Cell, len(pb.Cells))
for i, c := range pb.Cells {
queries := make([]chronograf.Query, len(c.Queries))
for j, q := range c.Queries {
queries[j] = chronograf.Query{
Command: q.Command,
DB: q.DB,
RP: q.RP,
GroupBys: q.GroupBys,
Wheres: q.Wheres,
Label: q.Label,
}
if q.Range.Upper != q.Range.Lower {
queries[j].Range = &chronograf.Range{
Upper: q.Range.Upper,
Lower: q.Range.Lower,
}
}
}
axes := make(map[string]chronograf.Axis, len(c.Axes))
for a, r := range c.Axes {
axes[a] = chronograf.Axis{
Bounds: r.Bounds,
Label: r.Label,
}
}
cells[i] = chronograf.Cell{
X: c.X,
Y: c.Y,
W: c.W,
H: c.H,
I: c.I,
Name: c.Name,
Queries: queries,
Type: c.Type,
Axes: axes,
}
}
l.Cells = cells
return nil
}
// MarshalDashboard encodes a dashboard to binary protobuf format.
func MarshalDashboard(d chronograf.Dashboard) ([]byte, error) {
cells := make([]*DashboardCell, len(d.Cells))
for i, c := range d.Cells {
queries := make([]*Query, len(c.Queries))
for j, q := range c.Queries {
r := new(Range)
if q.Range != nil {
r.Upper, r.Lower = q.Range.Upper, q.Range.Lower
}
q.Shifts = q.QueryConfig.Shifts
queries[j] = &Query{
Command: q.Command,
Label: q.Label,
Range: r,
Source: q.Source,
Type: q.Type,
}
shifts := make([]*TimeShift, len(q.Shifts))
for k := range q.Shifts {
shift := &TimeShift{
Label: q.Shifts[k].Label,
Unit: q.Shifts[k].Unit,
Quantity: q.Shifts[k].Quantity,
}
shifts[k] = shift
}
queries[j].Shifts = shifts
}
colors := make([]*Color, len(c.CellColors))
for j, color := range c.CellColors {
colors[j] = &Color{
ID: color.ID,
Type: color.Type,
Hex: color.Hex,
Name: color.Name,
Value: color.Value,
}
}
axes := make(map[string]*Axis, len(c.Axes))
for a, r := range c.Axes {
axes[a] = &Axis{
Bounds: r.Bounds,
Label: r.Label,
Prefix: r.Prefix,
Suffix: r.Suffix,
Base: r.Base,
Scale: r.Scale,
}
}
sortBy := &RenamableField{
InternalName: c.TableOptions.SortBy.InternalName,
DisplayName: c.TableOptions.SortBy.DisplayName,
Visible: c.TableOptions.SortBy.Visible,
}
tableOptions := &TableOptions{
VerticalTimeAxis: c.TableOptions.VerticalTimeAxis,
SortBy: sortBy,
Wrapping: c.TableOptions.Wrapping,
FixFirstColumn: c.TableOptions.FixFirstColumn,
}
decimalPlaces := &DecimalPlaces{
IsEnforced: c.DecimalPlaces.IsEnforced,
Digits: c.DecimalPlaces.Digits,
}
fieldOptions := make([]*RenamableField, len(c.FieldOptions))
for i, field := range c.FieldOptions {
fieldOptions[i] = &RenamableField{
InternalName: field.InternalName,
DisplayName: field.DisplayName,
Visible: field.Visible,
}
}
cells[i] = &DashboardCell{
ID: c.ID,
X: c.X,
Y: c.Y,
W: c.W,
H: c.H,
Name: c.Name,
Queries: queries,
Type: c.Type,
Axes: axes,
Colors: colors,
TableOptions: tableOptions,
FieldOptions: fieldOptions,
TimeFormat: c.TimeFormat,
DecimalPlaces: decimalPlaces,
}
}
templates := make([]*Template, len(d.Templates))
for i, t := range d.Templates {
vals := make([]*TemplateValue, len(t.Values))
for j, v := range t.Values {
vals[j] = &TemplateValue{
Selected: v.Selected,
Type: v.Type,
Value: v.Value,
Key: v.Key,
}
}
template := &Template{
ID: string(t.ID),
TempVar: t.Var,
Values: vals,
Type: t.Type,
Label: t.Label,
}
if t.Query != nil {
template.Query = &TemplateQuery{
Command: t.Query.Command,
Db: t.Query.DB,
Rp: t.Query.RP,
Measurement: t.Query.Measurement,
TagKey: t.Query.TagKey,
FieldKey: t.Query.FieldKey,
}
}
templates[i] = template
}
return proto.Marshal(&Dashboard{
ID: int64(d.ID),
Cells: cells,
Templates: templates,
Name: d.Name,
Organization: d.Organization,
})
}
// UnmarshalDashboard decodes a layout from binary protobuf data.
func UnmarshalDashboard(data []byte, d *chronograf.Dashboard) error {
var pb Dashboard
if err := proto.Unmarshal(data, &pb); err != nil {
return err
}
cells := make([]chronograf.DashboardCell, len(pb.Cells))
for i, c := range pb.Cells {
queries := make([]chronograf.DashboardQuery, len(c.Queries))
for j, q := range c.Queries {
queries[j] = chronograf.DashboardQuery{
Command: q.Command,
Label: q.Label,
Source: q.Source,
Type: q.Type,
}
if q.Range.Upper != q.Range.Lower {
queries[j].Range = &chronograf.Range{
Upper: q.Range.Upper,
Lower: q.Range.Lower,
}
}
shifts := make([]chronograf.TimeShift, len(q.Shifts))
for k := range q.Shifts {
shift := chronograf.TimeShift{
Label: q.Shifts[k].Label,
Unit: q.Shifts[k].Unit,
Quantity: q.Shifts[k].Quantity,
}
shifts[k] = shift
}
queries[j].Shifts = shifts
}
colors := make([]chronograf.CellColor, len(c.Colors))
for j, color := range c.Colors {
colors[j] = chronograf.CellColor{
ID: color.ID,
Type: color.Type,
Hex: color.Hex,
Name: color.Name,
Value: color.Value,
}
}
axes := make(map[string]chronograf.Axis, len(c.Axes))
for a, r := range c.Axes {
// axis base defaults to 10
if r.Base == "" {
r.Base = "10"
}
if r.Scale == "" {
r.Scale = "linear"
}
if r.Bounds != nil {
axes[a] = chronograf.Axis{
Bounds: r.Bounds,
Label: r.Label,
Prefix: r.Prefix,
Suffix: r.Suffix,
Base: r.Base,
Scale: r.Scale,
}
} else {
axes[a] = chronograf.Axis{
Bounds: []string{},
Base: r.Base,
Scale: r.Scale,
}
}
}
tableOptions := chronograf.TableOptions{}
if c.TableOptions != nil {
sortBy := chronograf.RenamableField{}
if c.TableOptions.SortBy != nil {
sortBy.InternalName = c.TableOptions.SortBy.InternalName
sortBy.DisplayName = c.TableOptions.SortBy.DisplayName
sortBy.Visible = c.TableOptions.SortBy.Visible
}
tableOptions.SortBy = sortBy
tableOptions.VerticalTimeAxis = c.TableOptions.VerticalTimeAxis
tableOptions.Wrapping = c.TableOptions.Wrapping
tableOptions.FixFirstColumn = c.TableOptions.FixFirstColumn
}
fieldOptions := make([]chronograf.RenamableField, len(c.FieldOptions))
for i, field := range c.FieldOptions {
fieldOptions[i] = chronograf.RenamableField{}
fieldOptions[i].InternalName = field.InternalName
fieldOptions[i].DisplayName = field.DisplayName
fieldOptions[i].Visible = field.Visible
}
decimalPlaces := chronograf.DecimalPlaces{}
if c.DecimalPlaces != nil {
decimalPlaces.IsEnforced = c.DecimalPlaces.IsEnforced
decimalPlaces.Digits = c.DecimalPlaces.Digits
} else {
decimalPlaces.IsEnforced = true
decimalPlaces.Digits = 2
}
// FIXME: this is merely for legacy cells and
// should be removed as soon as possible
cellType := c.Type
if cellType == "" {
cellType = "line"
}
cells[i] = chronograf.DashboardCell{
ID: c.ID,
X: c.X,
Y: c.Y,
W: c.W,
H: c.H,
Name: c.Name,
Queries: queries,
Type: cellType,
Axes: axes,
CellColors: colors,
TableOptions: tableOptions,
FieldOptions: fieldOptions,
TimeFormat: c.TimeFormat,
DecimalPlaces: decimalPlaces,
}
}
templates := make([]chronograf.Template, len(pb.Templates))
for i, t := range pb.Templates {
vals := make([]chronograf.TemplateValue, len(t.Values))
for j, v := range t.Values {
vals[j] = chronograf.TemplateValue{
Selected: v.Selected,
Type: v.Type,
Value: v.Value,
Key: v.Key,
}
}
template := chronograf.Template{
ID: chronograf.TemplateID(t.ID),
TemplateVar: chronograf.TemplateVar{
Var: t.TempVar,
Values: vals,
},
Type: t.Type,
Label: t.Label,
}
if t.Query != nil {
template.Query = &chronograf.TemplateQuery{
Command: t.Query.Command,
DB: t.Query.Db,
RP: t.Query.Rp,
Measurement: t.Query.Measurement,
TagKey: t.Query.TagKey,
FieldKey: t.Query.FieldKey,
}
}
templates[i] = template
}
d.ID = chronograf.DashboardID(pb.ID)
d.Cells = cells
d.Templates = templates
d.Name = pb.Name
d.Organization = pb.Organization
return nil
}
// ScopedAlert contains the source and the kapacitor id
type ScopedAlert struct {
chronograf.AlertRule
SrcID int
KapaID int
}
// MarshalAlertRule encodes an alert rule to binary protobuf format.
func MarshalAlertRule(r *ScopedAlert) ([]byte, error) {
j, err := json.Marshal(r.AlertRule)
if err != nil {
return nil, err
}
return proto.Marshal(&AlertRule{
ID: r.ID,
SrcID: int64(r.SrcID),
KapaID: int64(r.KapaID),
JSON: string(j),
})
}
// UnmarshalAlertRule decodes an alert rule from binary protobuf data.
func UnmarshalAlertRule(data []byte, r *ScopedAlert) error {
var pb AlertRule
if err := proto.Unmarshal(data, &pb); err != nil {
return err
}
err := json.Unmarshal([]byte(pb.JSON), &r.AlertRule)
if err != nil {
return err
}
r.SrcID = int(pb.SrcID)
r.KapaID = int(pb.KapaID)
return nil
}
// MarshalUser encodes a user to binary protobuf format.
// We are ignoring the password for now.
func MarshalUser(u *chronograf.User) ([]byte, error) {
roles := make([]*Role, len(u.Roles))
for i, role := range u.Roles {
roles[i] = &Role{
Organization: role.Organization,
Name: role.Name,
}
}
return MarshalUserPB(&User{
ID: u.ID,
Name: u.Name,
Provider: u.Provider,
Scheme: u.Scheme,
Roles: roles,
SuperAdmin: u.SuperAdmin,
})
}
// MarshalUserPB encodes a user to binary protobuf format.
// We are ignoring the password for now.
func MarshalUserPB(u *User) ([]byte, error) {
return proto.Marshal(u)
}
// UnmarshalUser decodes a user from binary protobuf data.
// We are ignoring the password for now.
func UnmarshalUser(data []byte, u *chronograf.User) error {
var pb User
if err := UnmarshalUserPB(data, &pb); err != nil {
return err
}
roles := make([]chronograf.Role, len(pb.Roles))
for i, role := range pb.Roles {
roles[i] = chronograf.Role{
Organization: role.Organization,
Name: role.Name,
}
}
u.ID = pb.ID
u.Name = pb.Name
u.Provider = pb.Provider
u.Scheme = pb.Scheme
u.SuperAdmin = pb.SuperAdmin
u.Roles = roles
return nil
}
// UnmarshalUserPB decodes a user from binary protobuf data.
// We are ignoring the password for now.
func UnmarshalUserPB(data []byte, u *User) error {
return proto.Unmarshal(data, u)
}
// MarshalRole encodes a role to binary protobuf format.
func MarshalRole(r *chronograf.Role) ([]byte, error) {
return MarshalRolePB(&Role{
Organization: r.Organization,
Name: r.Name,
})
}
// MarshalRolePB encodes a role to binary protobuf format.
func MarshalRolePB(r *Role) ([]byte, error) {
return proto.Marshal(r)
}
// UnmarshalRole decodes a role from binary protobuf data.
func UnmarshalRole(data []byte, r *chronograf.Role) error {
var pb Role
if err := UnmarshalRolePB(data, &pb); err != nil {
return err
}
r.Organization = pb.Organization
r.Name = pb.Name
return nil
}
// UnmarshalRolePB decodes a role from binary protobuf data.
func UnmarshalRolePB(data []byte, r *Role) error {
return proto.Unmarshal(data, r)
}
// MarshalOrganization encodes a organization to binary protobuf format.
func MarshalOrganization(o *chronograf.Organization) ([]byte, error) {
return MarshalOrganizationPB(&Organization{
ID: o.ID,
Name: o.Name,
DefaultRole: o.DefaultRole,
})
}
// MarshalOrganizationPB encodes a organization to binary protobuf format.
func MarshalOrganizationPB(o *Organization) ([]byte, error) {
return proto.Marshal(o)
}
// UnmarshalOrganization decodes a organization from binary protobuf data.
func UnmarshalOrganization(data []byte, o *chronograf.Organization) error {
var pb Organization
if err := UnmarshalOrganizationPB(data, &pb); err != nil {
return err
}
o.ID = pb.ID
o.Name = pb.Name
o.DefaultRole = pb.DefaultRole
return nil
}
// UnmarshalOrganizationPB decodes a organization from binary protobuf data.
func UnmarshalOrganizationPB(data []byte, o *Organization) error {
return proto.Unmarshal(data, o)
}
// MarshalConfig encodes a config to binary protobuf format.
func MarshalConfig(c *chronograf.Config) ([]byte, error) {
return MarshalConfigPB(&Config{
Auth: &AuthConfig{
SuperAdminNewUsers: c.Auth.SuperAdminNewUsers,
},
})
}
// MarshalConfigPB encodes a config to binary protobuf format.
func MarshalConfigPB(c *Config) ([]byte, error) {
return proto.Marshal(c)
}
// UnmarshalConfig decodes a config from binary protobuf data.
func UnmarshalConfig(data []byte, c *chronograf.Config) error {
var pb Config
if err := UnmarshalConfigPB(data, &pb); err != nil {
return err
}
if pb.Auth == nil {
return fmt.Errorf("auth config is nil")
}
c.Auth.SuperAdminNewUsers = pb.Auth.SuperAdminNewUsers
return nil
}
// UnmarshalConfigPB decodes a config from binary protobuf data.
func UnmarshalConfigPB(data []byte, c *Config) error {
return proto.Unmarshal(data, c)
}
// MarshalOrganizationConfig encodes a config to binary protobuf format.
func MarshalOrganizationConfig(c *chronograf.OrganizationConfig) ([]byte, error) {
columns := make([]*LogViewerColumn, len(c.LogViewer.Columns))
for i, column := range c.LogViewer.Columns {
encodings := make([]*ColumnEncoding, len(column.Encodings))
for j, e := range column.Encodings {
encodings[j] = &ColumnEncoding{
Type: e.Type,
Value: e.Value,
Name: e.Name,
}
}
columns[i] = &LogViewerColumn{
Name: column.Name,
Position: column.Position,
Encodings: encodings,
}
}
return MarshalOrganizationConfigPB(&OrganizationConfig{
OrganizationID: c.OrganizationID,
LogViewer: &LogViewerConfig{
Columns: columns,
},
})
}
// MarshalOrganizationConfigPB encodes a config to binary protobuf format.
func MarshalOrganizationConfigPB(c *OrganizationConfig) ([]byte, error) {
return proto.Marshal(c)
}
// UnmarshalOrganizationConfig decodes a config from binary protobuf data.
func UnmarshalOrganizationConfig(data []byte, c *chronograf.OrganizationConfig) error {
var pb OrganizationConfig
if err := UnmarshalOrganizationConfigPB(data, &pb); err != nil {
return err
}
if pb.LogViewer == nil {
return fmt.Errorf("log Viewer config is nil")
}
c.OrganizationID = pb.OrganizationID
columns := make([]chronograf.LogViewerColumn, len(pb.LogViewer.Columns))
for i, c := range pb.LogViewer.Columns {
columns[i].Name = c.Name
columns[i].Position = c.Position
encodings := make([]chronograf.ColumnEncoding, len(c.Encodings))
for j, e := range c.Encodings {
encodings[j].Type = e.Type
encodings[j].Value = e.Value
encodings[j].Name = e.Name
}
columns[i].Encodings = encodings
}
c.LogViewer.Columns = columns
return nil
}
// UnmarshalOrganizationConfigPB decodes a config from binary protobuf data.
func UnmarshalOrganizationConfigPB(data []byte, c *OrganizationConfig) error {
return proto.Unmarshal(data, c)
}
// MarshalMapping encodes a mapping to binary protobuf format.
func MarshalMapping(m *chronograf.Mapping) ([]byte, error) {
return MarshalMappingPB(&Mapping{
Provider: m.Provider,
Scheme: m.Scheme,
ProviderOrganization: m.ProviderOrganization,
ID: m.ID,
Organization: m.Organization,
})
}
// MarshalMappingPB encodes a mapping to binary protobuf format.
func MarshalMappingPB(m *Mapping) ([]byte, error) {
return proto.Marshal(m)
}
// UnmarshalMapping decodes a mapping from binary protobuf data.
func UnmarshalMapping(data []byte, m *chronograf.Mapping) error {
var pb Mapping
if err := UnmarshalMappingPB(data, &pb); err != nil {
return err
}
m.Provider = pb.Provider
m.Scheme = pb.Scheme
m.ProviderOrganization = pb.ProviderOrganization
m.Organization = pb.Organization
m.ID = pb.ID
return nil
}
// UnmarshalMappingPB decodes a mapping from binary protobuf data.
func UnmarshalMappingPB(data []byte, m *Mapping) error {
return proto.Unmarshal(data, m)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,241 +0,0 @@
syntax = "proto3";
package internal;
message Source {
int64 ID = 1; // ID is the unique ID of the source
string Name = 2; // Name is the user-defined name for the source
string Type = 3; // Type specifies which kinds of source (enterprise vs oss)
string Username = 4; // Username is the username to connect to the source
string Password = 5;
string URL = 6; // URL are the connections to the source
bool Default = 7; // Flags an source as the default.
string Telegraf = 8; // Telegraf is the db telegraf is written to. By default it is "telegraf"
bool InsecureSkipVerify = 9; // InsecureSkipVerify accepts any certificate from the influx server
string MetaURL = 10; // MetaURL is the connection URL for the meta node.
string SharedSecret = 11; // SharedSecret signs the optional InfluxDB JWT Authorization
string Organization = 12; // Organization is the organization ID that resource belongs to
string Role = 13; // Role is the name of the miniumum role that a user must possess to access the resource
string DefaultRP = 14; // DefaultRP is the default retention policy used in database queries to this source
}
message Dashboard {
int64 ID = 1; // ID is the unique ID of the dashboard
string Name = 2; // Name is the user-defined name of the dashboard
repeated DashboardCell cells = 3; // a representation of all visual data required for rendering the dashboard
repeated Template templates = 4; // Templates replace template variables within InfluxQL
string Organization = 5; // Organization is the organization ID that resource belongs to
}
message DashboardCell {
int32 x = 1; // X-coordinate of Cell in the Dashboard
int32 y = 2; // Y-coordinate of Cell in the Dashboard
int32 w = 3; // Width of Cell in the Dashboard
int32 h = 4; // Height of Cell in the Dashboard
repeated Query queries = 5; // Time-series data queries for Dashboard
string name = 6; // User-facing name for this Dashboard
string type = 7; // Dashboard visualization type
string ID = 8; // id is the unique id of the dashboard. MIGRATED FIELD added in 1.2.0-beta6
map<string, Axis> axes = 9; // Axes represent the graphical viewport for a cell's visualizations
repeated Color colors = 10; // Colors represent encoding data values to color
TableOptions tableOptions = 12; // TableOptions for visualization of cell with type 'table'
repeated RenamableField fieldOptions = 13; // Options for each of the fields returned in a cell
string timeFormat = 14; // format for time
DecimalPlaces decimalPlaces = 15; // Represents how precise the values of this field should be
}
message DecimalPlaces {
bool isEnforced = 1; // whether decimal places should be enforced
int32 digits = 2; // the number of digits to display after decical point
}
message TableOptions {
reserved 1;
bool verticalTimeAxis = 2; // time axis should be a column not row
RenamableField sortBy = 3; // which column should a table be sorted by
string wrapping = 4; // option for text wrapping
reserved 5;
bool fixFirstColumn = 6; // first column should be fixed/frozen
}
message RenamableField {
string internalName = 1; // name of column
string displayName = 2; // what column is renamed to
bool visible = 3; // Represents whether RenamableField is visible
}
message Color {
string ID = 1; // ID is the unique id of the cell color
string Type = 2; // Type is how the color is used. Accepted (min,max,threshold)
string Hex = 3; // Hex is the hex number of the color
string Name = 4; // Name is the user-facing name of the hex color
string Value = 5; // Value is the data value mapped to this color
}
message Axis {
repeated int64 legacyBounds = 1; // legacyBounds are an ordered 2-tuple consisting of lower and upper axis extents, respectively
repeated string bounds = 2; // bounds are an arbitrary list of client-defined bounds.
string label = 3; // label is a description of this axis
string prefix = 4; // specifies the prefix for axis values
string suffix = 5; // specifies the suffix for axis values
string base = 6; // defines the base for axis values
string scale = 7; // represents the magnitude of the numbers on this axis
}
message Template {
string ID = 1; // ID is the unique ID associated with this template
string temp_var = 2;
repeated TemplateValue values = 3;
string type = 4; // Type can be fieldKeys, tagKeys, tagValues, CSV, constant, query, measurements, databases
string label = 5; // Label is a user-facing description of the Template
TemplateQuery query = 6; // Query is used to generate the choices for a template
}
message TemplateValue {
string type = 1; // Type can be tagKey, tagValue, fieldKey, csv, map, measurement, database, constant
string value = 2; // Value is the specific value used to replace a template in an InfluxQL query
bool selected = 3; // Selected states that this variable has been picked to use for replacement
string key = 4; // Key is the key for a specific Value if the Template Type is map (optional)
}
message TemplateQuery {
string command = 1; // Command is the query itself
string db = 2; // DB the database for the query (optional)
string rp = 3; // RP is a retention policy and optional;
string measurement = 4; // Measurement is the optinally selected measurement for the query
string tag_key = 5; // TagKey is the optionally selected tag key for the query
string field_key = 6; // FieldKey is the optionally selected field key for the query
}
message Server {
int64 ID = 1; // ID is the unique ID of the server
string Name = 2; // Name is the user-defined name for the server
string Username = 3; // Username is the username to connect to the server
string Password = 4;
string URL = 5; // URL is the path to the server
int64 SrcID = 6; // SrcID is the ID of the data source
bool Active = 7; // is this the currently active server for the source
string Organization = 8; // Organization is the organization ID that resource belongs to
bool InsecureSkipVerify = 9; // InsecureSkipVerify accepts any certificate from the client
string Type = 10; // Type is the kind of the server (e.g. flux)
string MetadataJSON = 11; // JSON byte representation of the metadata
}
message Layout {
string ID = 1; // ID is the unique ID of the layout.
string Application = 2; // Application is the user facing name of this Layout.
string Measurement = 3; // Measurement is the descriptive name of the time series data.
repeated Cell Cells = 4; // Cells are the individual visualization elements.
bool Autoflow = 5; // Autoflow indicates whether the frontend should layout the cells automatically.
}
message Cell {
int32 x = 1; // X-coordinate of Cell in the Layout
int32 y = 2; // Y-coordinate of Cell in the Layout
int32 w = 3; // Width of Cell in the Layout
int32 h = 4; // Height of Cell in the Layout
repeated Query queries = 5; // Time-series data queries for Cell.
string i = 6; // Unique identifier for the cell
string name = 7; // User-facing name for this cell
repeated int64 yranges = 8; // Limits of the y-axes
repeated string ylabels = 9; // Labels of the y-axes
string type = 10; // Cell visualization type
map<string, Axis> axes = 11; // Axes represent the graphical viewport for a cell's visualizations
}
message Query {
string Command = 1; // Command is the query itself
string DB = 2; // DB the database for the query (optional)
string RP = 3; // RP is a retention policy and optional;
repeated string GroupBys = 4; // GroupBys define the groups to combine in the query
repeated string Wheres = 5; // Wheres define the restrictions on the query
string Label = 6; // Label is the name of the Y-Axis
Range Range = 7; // Range is the upper and lower bound of the Y-Axis
string Source = 8; // Source is the optional URI to the data source
repeated TimeShift Shifts = 9; // TimeShift represents a shift to apply to an influxql query's time range
string Type = 10;
}
message TimeShift {
string Label = 1; // Label user facing description
string Unit = 2; // Unit influxql time unit representation i.e. ms, s, m, h, d
string Quantity = 3; // Quantity number of units
}
message Range {
int64 Upper = 1; // Upper is the upper-bound of the range
int64 Lower = 2; // Lower is the lower-bound of the range
}
message AlertRule {
string ID = 1; // ID is the unique ID of this alert rule
string JSON = 2; // JSON byte representation of the alert
int64 SrcID = 3; // SrcID is the id of the source this alert is associated with
int64 KapaID = 4; // KapaID is the id of the kapacitor this alert is associated with
}
message User {
uint64 ID = 1; // ID is the unique ID of this user
string Name = 2; // Name is the user's login name
string Provider = 3; // Provider is the provider that certifies and issues this user's authentication, e.g. GitHub
string Scheme = 4; // Scheme is the scheme used to perform this user's authentication, e.g. OAuth2 or LDAP
repeated Role Roles = 5; // Roles is set of roles a user has
bool SuperAdmin = 6; // SuperAdmin is bool that specifies whether a user is a super admin
}
message Role {
string Organization = 1; // Organization is the ID of the organization that this user has a role in
string Name = 2; // Name is the name of the role of this user in the respective organization
}
message Mapping {
string Provider = 1; // Provider is the provider that certifies and issues this user's authentication, e.g. GitHub
string Scheme = 2; // Scheme is the scheme used to perform this user's authentication, e.g. OAuth2 or LDAP
string ProviderOrganization = 3; // ProviderOrganization is the group or organizations that you are a part of in an auth provider
string ID = 4; // ID is the unique ID for the mapping
string Organization = 5; // Organization is the organization ID that resource belongs to
}
message Organization {
string ID = 1; // ID is the unique ID of the organization
string Name = 2; // Name is the organization's name
string DefaultRole = 3; // DefaultRole is the name of the role that is the default for any users added to the organization
}
message Config {
AuthConfig Auth = 1; // Auth is the configuration for options that auth related
}
message AuthConfig {
bool SuperAdminNewUsers = 1; // SuperAdminNewUsers configuration option that specifies which users will auto become super admin
}
message OrganizationConfig {
string OrganizationID = 1; // OrganizationID is the ID of the organization this config belogs to
LogViewerConfig LogViewer = 2; // LogViewer is the organization configuration for log viewer
}
message LogViewerConfig {
repeated LogViewerColumn Columns = 1; // Columns is the array of columns in the log viewer
}
message LogViewerColumn {
string Name = 1; // Name is the unique identifier of the log viewer column
int32 Position = 2; // Position is the position of the column in the log viewer's array of columns
repeated ColumnEncoding Encodings = 3; // Encodings is the array of encoded properties associated with a log viewer column
}
message ColumnEncoding {
string Type = 1; // Type is the purpose of the encoding, for example: severity color
string Value = 2; // Value is what the encoding corresponds to
string Name = 3; // Name is the optional encoding name
}
message BuildInfo {
string Version = 1; // Version is a descriptive git SHA identifier
string Commit = 2; // Commit is an abbreviated SHA
}
// The following is a vim modeline, it autoconfigures vim to have the
// appropriate tabbing and whitespace management to edit this file
//
// vim: ai:ts=4:noet:sts=4

View File

@ -1,480 +0,0 @@
package internal_test
import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
)
func TestMarshalSource(t *testing.T) {
v := chronograf.Source{
ID: 12,
Name: "Fountain of Truth",
Type: "influx",
Username: "docbrown",
Password: "1 point twenty-one g1g@w@tts",
URL: "http://twin-pines.mall.io:8086",
MetaURL: "http://twin-pines.meta.io:8086",
Default: true,
Telegraf: "telegraf",
}
var vv chronograf.Source
if buf, err := internal.MarshalSource(v); err != nil {
t.Fatal(err)
} else if err := internal.UnmarshalSource(buf, &vv); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(v, vv) {
t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v)
}
// Test if the new insecureskipverify works
v.InsecureSkipVerify = true
if buf, err := internal.MarshalSource(v); err != nil {
t.Fatal(err)
} else if err := internal.UnmarshalSource(buf, &vv); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(v, vv) {
t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v)
}
}
func TestMarshalSourceWithSecret(t *testing.T) {
v := chronograf.Source{
ID: 12,
Name: "Fountain of Truth",
Type: "influx",
Username: "docbrown",
SharedSecret: "hunter2s",
URL: "http://twin-pines.mall.io:8086",
MetaURL: "http://twin-pines.meta.io:8086",
Default: true,
Telegraf: "telegraf",
}
var vv chronograf.Source
if buf, err := internal.MarshalSource(v); err != nil {
t.Fatal(err)
} else if err := internal.UnmarshalSource(buf, &vv); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(v, vv) {
t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v)
}
// Test if the new insecureskipverify works
v.InsecureSkipVerify = true
if buf, err := internal.MarshalSource(v); err != nil {
t.Fatal(err)
} else if err := internal.UnmarshalSource(buf, &vv); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(v, vv) {
t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v)
}
}
func TestMarshalServer(t *testing.T) {
v := chronograf.Server{
ID: 12,
SrcID: 2,
Name: "Fountain of Truth",
Username: "docbrown",
Password: "1 point twenty-one g1g@w@tts",
URL: "http://oldmanpeabody.mall.io:9092",
InsecureSkipVerify: true,
}
var vv chronograf.Server
if buf, err := internal.MarshalServer(v); err != nil {
t.Fatal(err)
} else if err := internal.UnmarshalServer(buf, &vv); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(v, vv) {
t.Fatalf("source protobuf copy error: got %#v, expected %#v", vv, v)
}
}
func TestMarshalLayout(t *testing.T) {
layout := chronograf.Layout{
ID: "id",
Measurement: "measurement",
Application: "app",
Cells: []chronograf.Cell{
{
X: 1,
Y: 1,
W: 4,
H: 4,
I: "anotherid",
Type: "line",
Name: "cell1",
Axes: map[string]chronograf.Axis{
"y": chronograf.Axis{
Bounds: []string{"0", "100"},
Label: "foo",
},
},
Queries: []chronograf.Query{
{
Range: &chronograf.Range{
Lower: 1,
Upper: 2,
},
Label: "y1",
Command: "select mean(usage_user) as usage_user from cpu",
Wheres: []string{
`"host"="myhost"`,
},
GroupBys: []string{
`"cpu"`,
},
},
},
},
},
}
var vv chronograf.Layout
if buf, err := internal.MarshalLayout(layout); err != nil {
t.Fatal(err)
} else if err := internal.UnmarshalLayout(buf, &vv); err != nil {
t.Fatal(err)
} else if !cmp.Equal(layout, vv) {
t.Fatal("source protobuf copy error: diff:\n", cmp.Diff(layout, vv))
}
}
func Test_MarshalDashboard(t *testing.T) {
dashboard := chronograf.Dashboard{
ID: 1,
Cells: []chronograf.DashboardCell{
{
ID: "9b5367de-c552-4322-a9e8-7f384cbd235c",
X: 0,
Y: 0,
W: 4,
H: 4,
Name: "Super awesome query",
Queries: []chronograf.DashboardQuery{
{
Command: "select * from cpu",
Label: "CPU Utilization",
Range: &chronograf.Range{
Upper: int64(100),
},
Source: "/chronograf/v1/sources/1",
Shifts: []chronograf.TimeShift{},
},
},
Axes: map[string]chronograf.Axis{
"y": chronograf.Axis{
Bounds: []string{"0", "3", "1-7", "foo"},
Label: "foo",
Prefix: "M",
Suffix: "m",
Base: "2",
Scale: "roflscale",
},
},
Type: "line",
CellColors: []chronograf.CellColor{
{
ID: "myid",
Type: "min",
Hex: "#234567",
Name: "Laser",
Value: "0",
},
{
ID: "id2",
Type: "max",
Hex: "#876543",
Name: "Solitude",
Value: "100",
},
},
TableOptions: chronograf.TableOptions{},
FieldOptions: []chronograf.RenamableField{},
TimeFormat: "",
},
},
Templates: []chronograf.Template{},
Name: "Dashboard",
}
var actual chronograf.Dashboard
if buf, err := internal.MarshalDashboard(dashboard); err != nil {
t.Fatal("Error marshaling dashboard: err", err)
} else if err := internal.UnmarshalDashboard(buf, &actual); err != nil {
t.Fatal("Error unmarshalling dashboard: err:", err)
} else if !cmp.Equal(dashboard, actual) {
t.Fatalf("Dashboard protobuf copy error: diff follows:\n%s", cmp.Diff(dashboard, actual))
}
}
func Test_MarshalDashboard_WithLegacyBounds(t *testing.T) {
dashboard := chronograf.Dashboard{
ID: 1,
Cells: []chronograf.DashboardCell{
{
ID: "9b5367de-c552-4322-a9e8-7f384cbd235c",
X: 0,
Y: 0,
W: 4,
H: 4,
Name: "Super awesome query",
Queries: []chronograf.DashboardQuery{
{
Command: "select * from cpu",
Label: "CPU Utilization",
Range: &chronograf.Range{
Upper: int64(100),
},
Shifts: []chronograf.TimeShift{},
},
},
Axes: map[string]chronograf.Axis{
"y": chronograf.Axis{
LegacyBounds: [2]int64{0, 5},
},
},
CellColors: []chronograf.CellColor{
{
ID: "myid",
Type: "min",
Hex: "#234567",
Name: "Laser",
Value: "0",
},
{
ID: "id2",
Type: "max",
Hex: "#876543",
Name: "Solitude",
Value: "100",
},
},
TableOptions: chronograf.TableOptions{},
TimeFormat: "MM:DD:YYYY",
FieldOptions: []chronograf.RenamableField{},
Type: "line",
},
},
Templates: []chronograf.Template{},
Name: "Dashboard",
}
expected := chronograf.Dashboard{
ID: 1,
Cells: []chronograf.DashboardCell{
{
ID: "9b5367de-c552-4322-a9e8-7f384cbd235c",
X: 0,
Y: 0,
W: 4,
H: 4,
Name: "Super awesome query",
Queries: []chronograf.DashboardQuery{
{
Command: "select * from cpu",
Label: "CPU Utilization",
Range: &chronograf.Range{
Upper: int64(100),
},
Shifts: []chronograf.TimeShift{},
},
},
Axes: map[string]chronograf.Axis{
"y": chronograf.Axis{
Bounds: []string{},
Base: "10",
Scale: "linear",
},
},
CellColors: []chronograf.CellColor{
{
ID: "myid",
Type: "min",
Hex: "#234567",
Name: "Laser",
Value: "0",
},
{
ID: "id2",
Type: "max",
Hex: "#876543",
Name: "Solitude",
Value: "100",
},
},
TableOptions: chronograf.TableOptions{},
FieldOptions: []chronograf.RenamableField{},
TimeFormat: "MM:DD:YYYY",
Type: "line",
},
},
Templates: []chronograf.Template{},
Name: "Dashboard",
}
var actual chronograf.Dashboard
if buf, err := internal.MarshalDashboard(dashboard); err != nil {
t.Fatal("Error marshaling dashboard: err", err)
} else if err := internal.UnmarshalDashboard(buf, &actual); err != nil {
t.Fatal("Error unmarshalling dashboard: err:", err)
} else if !cmp.Equal(expected, actual) {
t.Fatalf("Dashboard protobuf copy error: diff follows:\n%s", cmp.Diff(expected, actual))
}
}
func Test_MarshalDashboard_WithEmptyLegacyBounds(t *testing.T) {
dashboard := chronograf.Dashboard{
ID: 1,
Cells: []chronograf.DashboardCell{
{
ID: "9b5367de-c552-4322-a9e8-7f384cbd235c",
X: 0,
Y: 0,
W: 4,
H: 4,
Name: "Super awesome query",
Queries: []chronograf.DashboardQuery{
{
Command: "select * from cpu",
Label: "CPU Utilization",
Range: &chronograf.Range{
Upper: int64(100),
},
Shifts: []chronograf.TimeShift{},
},
},
Axes: map[string]chronograf.Axis{
"y": chronograf.Axis{
LegacyBounds: [2]int64{},
},
},
CellColors: []chronograf.CellColor{
{
ID: "myid",
Type: "min",
Hex: "#234567",
Name: "Laser",
Value: "0",
},
{
ID: "id2",
Type: "max",
Hex: "#876543",
Name: "Solitude",
Value: "100",
},
},
Type: "line",
TableOptions: chronograf.TableOptions{},
FieldOptions: []chronograf.RenamableField{},
TimeFormat: "MM:DD:YYYY",
},
},
Templates: []chronograf.Template{},
Name: "Dashboard",
}
expected := chronograf.Dashboard{
ID: 1,
Cells: []chronograf.DashboardCell{
{
ID: "9b5367de-c552-4322-a9e8-7f384cbd235c",
X: 0,
Y: 0,
W: 4,
H: 4,
Name: "Super awesome query",
Queries: []chronograf.DashboardQuery{
{
Command: "select * from cpu",
Label: "CPU Utilization",
Range: &chronograf.Range{
Upper: int64(100),
},
Shifts: []chronograf.TimeShift{},
},
},
Axes: map[string]chronograf.Axis{
"y": chronograf.Axis{
Bounds: []string{},
Base: "10",
Scale: "linear",
},
},
CellColors: []chronograf.CellColor{
{
ID: "myid",
Type: "min",
Hex: "#234567",
Name: "Laser",
Value: "0",
},
{
ID: "id2",
Type: "max",
Hex: "#876543",
Name: "Solitude",
Value: "100",
},
},
TableOptions: chronograf.TableOptions{},
FieldOptions: []chronograf.RenamableField{},
TimeFormat: "MM:DD:YYYY",
Type: "line",
},
},
Templates: []chronograf.Template{},
Name: "Dashboard",
}
var actual chronograf.Dashboard
if buf, err := internal.MarshalDashboard(dashboard); err != nil {
t.Fatal("Error marshaling dashboard: err", err)
} else if err := internal.UnmarshalDashboard(buf, &actual); err != nil {
t.Fatal("Error unmarshalling dashboard: err:", err)
} else if !cmp.Equal(expected, actual) {
t.Fatalf("Dashboard protobuf copy error: diff follows:\n%s", cmp.Diff(expected, actual))
}
}
func Test_MarshalDashboard_WithEmptyCellType(t *testing.T) {
dashboard := chronograf.Dashboard{
ID: 1,
Cells: []chronograf.DashboardCell{
{
ID: "9b5367de-c552-4322-a9e8-7f384cbd235c",
},
},
}
expected := chronograf.Dashboard{
ID: 1,
Cells: []chronograf.DashboardCell{
{
ID: "9b5367de-c552-4322-a9e8-7f384cbd235c",
Type: "line",
Queries: []chronograf.DashboardQuery{},
Axes: map[string]chronograf.Axis{},
CellColors: []chronograf.CellColor{},
TableOptions: chronograf.TableOptions{},
FieldOptions: []chronograf.RenamableField{},
},
},
Templates: []chronograf.Template{},
}
var actual chronograf.Dashboard
if buf, err := internal.MarshalDashboard(dashboard); err != nil {
t.Fatal("Error marshaling dashboard: err", err)
} else if err := internal.UnmarshalDashboard(buf, &actual); err != nil {
t.Fatal("Error unmarshalling dashboard: err:", err)
} else if !cmp.Equal(expected, actual) {
t.Fatalf("Dashboard protobuf copy error: diff follows:\n%s", cmp.Diff(expected, actual))
}
}

View File

@ -1,128 +0,0 @@
package bolt
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
bolt "go.etcd.io/bbolt"
)
// Ensure LayoutsStore implements chronograf.LayoutsStore.
var _ chronograf.LayoutsStore = &LayoutsStore{}
// LayoutsBucket is the bolt bucket layouts are stored in
var LayoutsBucket = []byte("Layout")
// LayoutsStore is the bolt implementation to store layouts
type LayoutsStore struct {
client *Client
IDs chronograf.ID
}
func (s *LayoutsStore) Migrate(ctx context.Context) error {
return nil
}
// All returns all known layouts
func (s *LayoutsStore) All(ctx context.Context) ([]chronograf.Layout, error) {
var srcs []chronograf.Layout
if err := s.client.db.View(func(tx *bolt.Tx) error {
if err := tx.Bucket(LayoutsBucket).ForEach(func(k, v []byte) error {
var src chronograf.Layout
if err := internal.UnmarshalLayout(v, &src); err != nil {
return err
}
srcs = append(srcs, src)
return nil
}); err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return srcs, nil
}
// Add creates a new Layout in the LayoutsStore.
func (s *LayoutsStore) Add(ctx context.Context, src chronograf.Layout) (chronograf.Layout, error) {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(LayoutsBucket)
id, err := s.IDs.Generate()
if err != nil {
return err
}
src.ID = id
if v, err := internal.MarshalLayout(src); err != nil {
return err
} else if err := b.Put([]byte(src.ID), v); err != nil {
return err
}
return nil
}); err != nil {
return chronograf.Layout{}, err
}
return src, nil
}
// Delete removes the Layout from the LayoutsStore
func (s *LayoutsStore) Delete(ctx context.Context, src chronograf.Layout) error {
_, err := s.Get(ctx, src.ID)
if err != nil {
return err
}
if err := s.client.db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket(LayoutsBucket).Delete([]byte(src.ID)); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}
// Get returns a Layout if the id exists.
func (s *LayoutsStore) Get(ctx context.Context, id string) (chronograf.Layout, error) {
var src chronograf.Layout
if err := s.client.db.View(func(tx *bolt.Tx) error {
if v := tx.Bucket(LayoutsBucket).Get([]byte(id)); v == nil {
return chronograf.ErrLayoutNotFound
} else if err := internal.UnmarshalLayout(v, &src); err != nil {
return err
}
return nil
}); err != nil {
return chronograf.Layout{}, err
}
return src, nil
}
// Update a Layout
func (s *LayoutsStore) Update(ctx context.Context, src chronograf.Layout) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
// Get an existing layout with the same ID.
b := tx.Bucket(LayoutsBucket)
if v := b.Get([]byte(src.ID)); v == nil {
return chronograf.ErrLayoutNotFound
}
if v, err := internal.MarshalLayout(src); err != nil {
return err
} else if err := b.Put([]byte(src.ID), v); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}

View File

@ -1,128 +0,0 @@
package bolt
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
bolt "go.etcd.io/bbolt"
)
// Ensure MappingsStore implements chronograf.MappingsStore.
var _ chronograf.MappingsStore = &MappingsStore{}
var (
// MappingsBucket is the bucket where organizations are stored.
MappingsBucket = []byte("MappingsV1")
)
// MappingsStore uses bolt to store and retrieve Mappings
type MappingsStore struct {
client *Client
}
// Migrate sets the default organization at runtime
func (s *MappingsStore) Migrate(ctx context.Context) error {
return nil
}
// Add creates a new Mapping in the MappingsStore
func (s *MappingsStore) Add(ctx context.Context, o *chronograf.Mapping) (*chronograf.Mapping, error) {
err := s.client.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(MappingsBucket)
seq, err := b.NextSequence()
if err != nil {
return err
}
o.ID = fmt.Sprintf("%d", seq)
v, err := internal.MarshalMapping(o)
if err != nil {
return err
}
return b.Put([]byte(o.ID), v)
})
if err != nil {
return nil, err
}
return o, nil
}
// All returns all known organizations
func (s *MappingsStore) All(ctx context.Context) ([]chronograf.Mapping, error) {
var mappings []chronograf.Mapping
err := s.each(ctx, func(m *chronograf.Mapping) {
mappings = append(mappings, *m)
})
if err != nil {
return nil, err
}
return mappings, nil
}
// Delete the organization from MappingsStore
func (s *MappingsStore) Delete(ctx context.Context, o *chronograf.Mapping) error {
_, err := s.get(ctx, o.ID)
if err != nil {
return err
}
if err := s.client.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(MappingsBucket).Delete([]byte(o.ID))
}); err != nil {
return err
}
return nil
}
func (s *MappingsStore) get(ctx context.Context, id string) (*chronograf.Mapping, error) {
var o chronograf.Mapping
err := s.client.db.View(func(tx *bolt.Tx) error {
v := tx.Bucket(MappingsBucket).Get([]byte(id))
if v == nil {
return chronograf.ErrMappingNotFound
}
return internal.UnmarshalMapping(v, &o)
})
if err != nil {
return nil, err
}
return &o, nil
}
func (s *MappingsStore) each(ctx context.Context, fn func(*chronograf.Mapping)) error {
return s.client.db.View(func(tx *bolt.Tx) error {
return tx.Bucket(MappingsBucket).ForEach(func(k, v []byte) error {
var m chronograf.Mapping
if err := internal.UnmarshalMapping(v, &m); err != nil {
return err
}
fn(&m)
return nil
})
})
}
// Get returns a Mapping if the id exists.
func (s *MappingsStore) Get(ctx context.Context, id string) (*chronograf.Mapping, error) {
return s.get(ctx, id)
}
// Update the organization in MappingsStore
func (s *MappingsStore) Update(ctx context.Context, o *chronograf.Mapping) error {
return s.client.db.Update(func(tx *bolt.Tx) error {
if v, err := internal.MarshalMapping(o); err != nil {
return err
} else if err := tx.Bucket(MappingsBucket).Put([]byte(o.ID), v); err != nil {
return err
}
return nil
})
}

View File

@ -1,480 +0,0 @@
package bolt_test
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/influxdata/influxdb/v2/chronograf"
)
var mappingCmpOptions = cmp.Options{
cmpopts.IgnoreFields(chronograf.Mapping{}, "ID"),
cmpopts.EquateEmpty(),
}
func TestMappingStore_Add(t *testing.T) {
type fields struct {
mappings []*chronograf.Mapping
}
type args struct {
mapping *chronograf.Mapping
}
type wants struct {
mapping *chronograf.Mapping
err error
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "default with wildcards",
args: args{
mapping: &chronograf.Mapping{
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
},
wants: wants{
mapping: &chronograf.Mapping{
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
},
},
{
name: "simple",
args: args{
mapping: &chronograf.Mapping{
Organization: "default",
Provider: "github",
Scheme: "oauth2",
ProviderOrganization: "idk",
},
},
wants: wants{
mapping: &chronograf.Mapping{
Organization: "default",
Provider: "github",
Scheme: "oauth2",
ProviderOrganization: "idk",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.MappingsStore
ctx := context.Background()
for _, mapping := range tt.fields.mappings {
// YOLO database prepopulation
_, _ = s.Add(ctx, mapping)
}
tt.args.mapping, err = s.Add(ctx, tt.args.mapping)
if (err != nil) != (tt.wants.err != nil) {
t.Errorf("MappingsStore.Add() error = %v, want error %v", err, tt.wants.err)
return
}
got, err := s.Get(ctx, tt.args.mapping.ID)
if err != nil {
t.Fatalf("failed to get mapping: %v", err)
return
}
if diff := cmp.Diff(got, tt.wants.mapping, mappingCmpOptions...); diff != "" {
t.Errorf("MappingStore.Add():\n-got/+want\ndiff %s", diff)
return
}
})
}
}
func TestMappingStore_All(t *testing.T) {
type fields struct {
mappings []*chronograf.Mapping
}
type wants struct {
mappings []chronograf.Mapping
err error
}
tests := []struct {
name string
fields fields
wants wants
}{
{
name: "simple",
fields: fields{
mappings: []*chronograf.Mapping{
&chronograf.Mapping{
Organization: "0",
Provider: "google",
Scheme: "ldap",
ProviderOrganization: "*",
},
},
},
wants: wants{
mappings: []chronograf.Mapping{
chronograf.Mapping{
Organization: "0",
Provider: "google",
Scheme: "ldap",
ProviderOrganization: "*",
},
chronograf.Mapping{
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.MappingsStore
ctx := context.Background()
for _, mapping := range tt.fields.mappings {
// YOLO database prepopulation
_, _ = s.Add(ctx, mapping)
}
got, err := s.All(ctx)
if (err != nil) != (tt.wants.err != nil) {
t.Errorf("MappingsStore.All() error = %v, want error %v", err, tt.wants.err)
return
}
if diff := cmp.Diff(got, tt.wants.mappings, mappingCmpOptions...); diff != "" {
t.Errorf("MappingStore.All():\n-got/+want\ndiff %s", diff)
return
}
})
}
}
func TestMappingStore_Delete(t *testing.T) {
type fields struct {
mappings []*chronograf.Mapping
}
type args struct {
mapping *chronograf.Mapping
}
type wants struct {
err error
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "simple",
fields: fields{
mappings: []*chronograf.Mapping{
&chronograf.Mapping{
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
&chronograf.Mapping{
Organization: "0",
Provider: "google",
Scheme: "ldap",
ProviderOrganization: "*",
},
},
},
args: args{
mapping: &chronograf.Mapping{
ID: "1",
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
},
wants: wants{
err: nil,
},
},
{
name: "mapping not found",
fields: fields{
mappings: []*chronograf.Mapping{
&chronograf.Mapping{
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
&chronograf.Mapping{
Organization: "0",
Provider: "google",
Scheme: "ldap",
ProviderOrganization: "*",
},
},
},
args: args{
mapping: &chronograf.Mapping{
ID: "0",
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
},
wants: wants{
err: chronograf.ErrMappingNotFound,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.MappingsStore
ctx := context.Background()
for _, mapping := range tt.fields.mappings {
// YOLO database prepopulation
_, _ = s.Add(ctx, mapping)
}
err = s.Delete(ctx, tt.args.mapping)
if (err != nil) != (tt.wants.err != nil) {
t.Errorf("MappingsStore.Delete() error = %v, want error %v", err, tt.wants.err)
return
}
})
}
}
func TestMappingStore_Get(t *testing.T) {
type fields struct {
mappings []*chronograf.Mapping
}
type args struct {
mappingID string
}
type wants struct {
mapping *chronograf.Mapping
err error
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "simple",
fields: fields{
mappings: []*chronograf.Mapping{
&chronograf.Mapping{
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
&chronograf.Mapping{
Organization: "0",
Provider: "google",
Scheme: "ldap",
ProviderOrganization: "*",
},
},
},
args: args{
mappingID: "1",
},
wants: wants{
mapping: &chronograf.Mapping{
ID: "1",
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
err: nil,
},
},
{
name: "mapping not found",
fields: fields{
mappings: []*chronograf.Mapping{
&chronograf.Mapping{
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
&chronograf.Mapping{
Organization: "0",
Provider: "google",
Scheme: "ldap",
ProviderOrganization: "*",
},
},
},
args: args{
mappingID: "0",
},
wants: wants{
err: chronograf.ErrMappingNotFound,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.MappingsStore
ctx := context.Background()
for _, mapping := range tt.fields.mappings {
// YOLO database prepopulation
_, _ = s.Add(ctx, mapping)
}
got, err := s.Get(ctx, tt.args.mappingID)
if (err != nil) != (tt.wants.err != nil) {
t.Errorf("MappingsStore.Get() error = %v, want error %v", err, tt.wants.err)
return
}
if diff := cmp.Diff(got, tt.wants.mapping, mappingCmpOptions...); diff != "" {
t.Errorf("MappingStore.Get():\n-got/+want\ndiff %s", diff)
return
}
})
}
}
func TestMappingStore_Update(t *testing.T) {
type fields struct {
mappings []*chronograf.Mapping
}
type args struct {
mapping *chronograf.Mapping
}
type wants struct {
mapping *chronograf.Mapping
err error
}
tests := []struct {
name string
fields fields
args args
wants wants
}{
{
name: "simple",
fields: fields{
mappings: []*chronograf.Mapping{
&chronograf.Mapping{
Organization: "default",
Provider: "*",
Scheme: "*",
ProviderOrganization: "*",
},
&chronograf.Mapping{
Organization: "0",
Provider: "google",
Scheme: "ldap",
ProviderOrganization: "*",
},
},
},
args: args{
mapping: &chronograf.Mapping{
ID: "1",
Organization: "default",
Provider: "cool",
Scheme: "it",
ProviderOrganization: "works",
},
},
wants: wants{
mapping: &chronograf.Mapping{
ID: "1",
Organization: "default",
Provider: "cool",
Scheme: "it",
ProviderOrganization: "works",
},
err: nil,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.MappingsStore
ctx := context.Background()
for _, mapping := range tt.fields.mappings {
// YOLO database prepopulation
_, _ = s.Add(ctx, mapping)
}
err = s.Update(ctx, tt.args.mapping)
if (err != nil) != (tt.wants.err != nil) {
t.Errorf("MappingsStore.Update() error = %v, want error %v", err, tt.wants.err)
return
}
if diff := cmp.Diff(tt.args.mapping, tt.wants.mapping, mappingCmpOptions...); diff != "" {
t.Errorf("MappingStore.Update():\n-got/+want\ndiff %s", diff)
return
}
})
}
}

View File

@ -1,236 +0,0 @@
package bolt
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
bolt "go.etcd.io/bbolt"
)
// Ensure OrganizationConfigStore implements chronograf.OrganizationConfigStore.
var _ chronograf.OrganizationConfigStore = &OrganizationConfigStore{}
// OrganizationConfigBucket is used to store chronograf organization configurations
var OrganizationConfigBucket = []byte("OrganizationConfigV1")
// OrganizationConfigStore uses bolt to store and retrieve organization configurations
type OrganizationConfigStore struct {
client *Client
}
func (s *OrganizationConfigStore) Migrate(ctx context.Context) error {
return nil
}
// Get retrieves an OrganizationConfig from the store
func (s *OrganizationConfigStore) Get(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) {
var c chronograf.OrganizationConfig
err := s.client.db.View(func(tx *bolt.Tx) error {
return s.get(ctx, tx, orgID, &c)
})
if err != nil {
return nil, err
}
return &c, nil
}
func (s *OrganizationConfigStore) get(ctx context.Context, tx *bolt.Tx, orgID string, c *chronograf.OrganizationConfig) error {
v := tx.Bucket(OrganizationConfigBucket).Get([]byte(orgID))
if len(v) == 0 {
return chronograf.ErrOrganizationConfigNotFound
}
return internal.UnmarshalOrganizationConfig(v, c)
}
// FindOrCreate gets an OrganizationConfig from the store or creates one if none exists for this organization
func (s *OrganizationConfigStore) FindOrCreate(ctx context.Context, orgID string) (*chronograf.OrganizationConfig, error) {
var c chronograf.OrganizationConfig
err := s.client.db.Update(func(tx *bolt.Tx) error {
err := s.get(ctx, tx, orgID, &c)
if err == chronograf.ErrOrganizationConfigNotFound {
c = newOrganizationConfig(orgID)
return s.put(ctx, tx, &c)
}
return err
})
if err != nil {
return nil, err
}
return &c, nil
}
// Put replaces the OrganizationConfig in the store
func (s *OrganizationConfigStore) Put(ctx context.Context, c *chronograf.OrganizationConfig) error {
return s.client.db.Update(func(tx *bolt.Tx) error {
return s.put(ctx, tx, c)
})
}
func (s *OrganizationConfigStore) put(ctx context.Context, tx *bolt.Tx, c *chronograf.OrganizationConfig) error {
if c == nil {
return fmt.Errorf("config provided was nil")
}
if v, err := internal.MarshalOrganizationConfig(c); err != nil {
return err
} else if err := tx.Bucket(OrganizationConfigBucket).Put([]byte(c.OrganizationID), v); err != nil {
return err
}
return nil
}
func newOrganizationConfig(orgID string) chronograf.OrganizationConfig {
return chronograf.OrganizationConfig{
OrganizationID: orgID,
LogViewer: chronograf.LogViewerConfig{
Columns: []chronograf.LogViewerColumn{
{
Name: "time",
Position: 0,
Encodings: []chronograf.ColumnEncoding{
{
Type: "visibility",
Value: "hidden",
},
},
},
{
Name: "severity",
Position: 1,
Encodings: []chronograf.ColumnEncoding{
{
Type: "visibility",
Value: "visible",
},
{
Type: "label",
Value: "icon",
},
{
Type: "label",
Value: "text",
},
{
Type: "color",
Name: "emerg",
Value: "ruby",
},
{
Type: "color",
Name: "alert",
Value: "fire",
},
{
Type: "color",
Name: "crit",
Value: "curacao",
},
{
Type: "color",
Name: "err",
Value: "tiger",
},
{
Type: "color",
Name: "warning",
Value: "pineapple",
},
{
Type: "color",
Name: "notice",
Value: "rainforest",
},
{
Type: "color",
Name: "info",
Value: "star",
},
{
Type: "color",
Name: "debug",
Value: "wolf",
},
},
},
{
Name: "timestamp",
Position: 2,
Encodings: []chronograf.ColumnEncoding{
{
Type: "visibility",
Value: "visible",
},
},
},
{
Name: "message",
Position: 3,
Encodings: []chronograf.ColumnEncoding{
{
Type: "visibility",
Value: "visible",
},
},
},
{
Name: "facility",
Position: 4,
Encodings: []chronograf.ColumnEncoding{
{
Type: "visibility",
Value: "visible",
},
},
},
{
Name: "procid",
Position: 5,
Encodings: []chronograf.ColumnEncoding{
{
Type: "visibility",
Value: "visible",
},
{
Type: "displayName",
Value: "Proc ID",
},
},
},
{
Name: "appname",
Position: 6,
Encodings: []chronograf.ColumnEncoding{
{
Type: "visibility",
Value: "visible",
},
{
Type: "displayName",
Value: "Application",
},
},
},
{
Name: "host",
Position: 7,
Encodings: []chronograf.ColumnEncoding{
{
Type: "visibility",
Value: "visible",
},
},
},
},
},
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,304 +0,0 @@
package bolt
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
"github.com/influxdata/influxdb/v2/chronograf/organizations"
bolt "go.etcd.io/bbolt"
)
// Ensure OrganizationsStore implements chronograf.OrganizationsStore.
var _ chronograf.OrganizationsStore = &OrganizationsStore{}
var (
// OrganizationsBucket is the bucket where organizations are stored.
OrganizationsBucket = []byte("OrganizationsV1")
// DefaultOrganizationID is the ID of the default organization.
DefaultOrganizationID = []byte("default")
)
const (
// DefaultOrganizationName is the Name of the default organization
DefaultOrganizationName string = "Default"
// DefaultOrganizationRole is the DefaultRole for the Default organization
DefaultOrganizationRole string = "member"
)
// OrganizationsStore uses bolt to store and retrieve Organizations
type OrganizationsStore struct {
client *Client
}
// Migrate sets the default organization at runtime
func (s *OrganizationsStore) Migrate(ctx context.Context) error {
return s.CreateDefault(ctx)
}
// CreateDefault does a findOrCreate on the default organization
func (s *OrganizationsStore) CreateDefault(ctx context.Context) error {
o := chronograf.Organization{
ID: string(DefaultOrganizationID),
Name: DefaultOrganizationName,
DefaultRole: DefaultOrganizationRole,
}
m := chronograf.Mapping{
ID: string(DefaultOrganizationID),
Organization: string(DefaultOrganizationID),
Provider: chronograf.MappingWildcard,
Scheme: chronograf.MappingWildcard,
ProviderOrganization: chronograf.MappingWildcard,
}
return s.client.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(OrganizationsBucket)
v := b.Get(DefaultOrganizationID)
if v != nil {
return nil
}
if v, err := internal.MarshalOrganization(&o); err != nil {
return err
} else if err := b.Put(DefaultOrganizationID, v); err != nil {
return err
}
b = tx.Bucket(MappingsBucket)
v = b.Get(DefaultOrganizationID)
if v != nil {
return nil
}
if v, err := internal.MarshalMapping(&m); err != nil {
return err
} else if err := b.Put(DefaultOrganizationID, v); err != nil {
return err
}
return nil
})
}
func (s *OrganizationsStore) nameIsUnique(ctx context.Context, name string) bool {
_, err := s.Get(ctx, chronograf.OrganizationQuery{Name: &name})
switch err {
case chronograf.ErrOrganizationNotFound:
return true
default:
return false
}
}
// DefaultOrganizationID returns the ID of the default organization
func (s *OrganizationsStore) DefaultOrganization(ctx context.Context) (*chronograf.Organization, error) {
var org chronograf.Organization
if err := s.client.db.View(func(tx *bolt.Tx) error {
v := tx.Bucket(OrganizationsBucket).Get(DefaultOrganizationID)
return internal.UnmarshalOrganization(v, &org)
}); err != nil {
return nil, err
}
return &org, nil
}
// Add creates a new Organization in the OrganizationsStore
func (s *OrganizationsStore) Add(ctx context.Context, o *chronograf.Organization) (*chronograf.Organization, error) {
if !s.nameIsUnique(ctx, o.Name) {
return nil, chronograf.ErrOrganizationAlreadyExists
}
err := s.client.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(OrganizationsBucket)
seq, err := b.NextSequence()
if err != nil {
return err
}
o.ID = fmt.Sprintf("%d", seq)
v, err := internal.MarshalOrganization(o)
if err != nil {
return err
}
return b.Put([]byte(o.ID), v)
})
return o, err
}
// All returns all known organizations
func (s *OrganizationsStore) All(ctx context.Context) ([]chronograf.Organization, error) {
var orgs []chronograf.Organization
err := s.each(ctx, func(o *chronograf.Organization) {
orgs = append(orgs, *o)
})
if err != nil {
return nil, err
}
return orgs, nil
}
// Delete the organization from OrganizationsStore
func (s *OrganizationsStore) Delete(ctx context.Context, o *chronograf.Organization) error {
if o.ID == string(DefaultOrganizationID) {
return chronograf.ErrCannotDeleteDefaultOrganization
}
_, err := s.get(ctx, o.ID)
if err != nil {
return err
}
if err := s.client.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(OrganizationsBucket).Delete([]byte(o.ID))
}); err != nil {
return err
}
// Dependent Delete of all resources
// Each of the associated organization stores expects organization to be
// set on the context.
ctx = context.WithValue(ctx, organizations.ContextKey, o.ID)
sourcesStore := organizations.NewSourcesStore(s.client.SourcesStore, o.ID)
sources, err := sourcesStore.All(ctx)
if err != nil {
return err
}
for _, source := range sources {
if err := sourcesStore.Delete(ctx, source); err != nil {
return err
}
}
serversStore := organizations.NewServersStore(s.client.ServersStore, o.ID)
servers, err := serversStore.All(ctx)
if err != nil {
return err
}
for _, server := range servers {
if err := serversStore.Delete(ctx, server); err != nil {
return err
}
}
dashboardsStore := organizations.NewDashboardsStore(s.client.DashboardsStore, o.ID)
dashboards, err := dashboardsStore.All(ctx)
if err != nil {
return err
}
for _, dashboard := range dashboards {
if err := dashboardsStore.Delete(ctx, dashboard); err != nil {
return err
}
}
usersStore := organizations.NewUsersStore(s.client.UsersStore, o.ID)
users, err := usersStore.All(ctx)
if err != nil {
return err
}
for _, user := range users {
if err := usersStore.Delete(ctx, &user); err != nil {
return err
}
}
mappings, err := s.client.MappingsStore.All(ctx)
if err != nil {
return err
}
for _, mapping := range mappings {
if mapping.Organization == o.ID {
if err := s.client.MappingsStore.Delete(ctx, &mapping); err != nil {
return err
}
}
}
return nil
}
func (s *OrganizationsStore) get(ctx context.Context, id string) (*chronograf.Organization, error) {
var o chronograf.Organization
err := s.client.db.View(func(tx *bolt.Tx) error {
v := tx.Bucket(OrganizationsBucket).Get([]byte(id))
if v == nil {
return chronograf.ErrOrganizationNotFound
}
return internal.UnmarshalOrganization(v, &o)
})
if err != nil {
return nil, err
}
return &o, nil
}
func (s *OrganizationsStore) each(ctx context.Context, fn func(*chronograf.Organization)) error {
return s.client.db.View(func(tx *bolt.Tx) error {
return tx.Bucket(OrganizationsBucket).ForEach(func(k, v []byte) error {
var org chronograf.Organization
if err := internal.UnmarshalOrganization(v, &org); err != nil {
return err
}
fn(&org)
return nil
})
})
}
// Get returns a Organization if the id exists.
// If an ID is provided in the query, the lookup time for an organization will be O(1).
// If Name is provided, the lookup time will be O(n).
// Get expects that only one of ID or Name will be specified, but will prefer ID over Name if both are specified.
func (s *OrganizationsStore) Get(ctx context.Context, q chronograf.OrganizationQuery) (*chronograf.Organization, error) {
if q.ID != nil {
return s.get(ctx, *q.ID)
}
if q.Name != nil {
var org *chronograf.Organization
err := s.each(ctx, func(o *chronograf.Organization) {
if org != nil {
return
}
if o.Name == *q.Name {
org = o
}
})
if err != nil {
return nil, err
}
if org == nil {
return nil, chronograf.ErrOrganizationNotFound
}
return org, nil
}
return nil, fmt.Errorf("must specify either ID, or Name in OrganizationQuery")
}
// Update the organization in OrganizationsStore
func (s *OrganizationsStore) Update(ctx context.Context, o *chronograf.Organization) error {
org, err := s.get(ctx, o.ID)
if err != nil {
return err
}
if o.Name != org.Name && !s.nameIsUnique(ctx, o.Name) {
return chronograf.ErrOrganizationAlreadyExists
}
return s.client.db.Update(func(tx *bolt.Tx) error {
if v, err := internal.MarshalOrganization(o); err != nil {
return err
} else if err := tx.Bucket(OrganizationsBucket).Put([]byte(o.ID), v); err != nil {
return err
}
return nil
})
}

View File

@ -1,659 +0,0 @@
package bolt_test
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt"
"github.com/influxdata/influxdb/v2/chronograf/roles"
)
var orgCmpOptions = cmp.Options{
cmpopts.IgnoreFields(chronograf.Organization{}, "ID"),
cmpopts.EquateEmpty(),
}
func TestOrganizationsStore_GetWithName(t *testing.T) {
type args struct {
ctx context.Context
org *chronograf.Organization
}
tests := []struct {
name string
args args
want *chronograf.Organization
wantErr bool
addFirst bool
}{
{
name: "Organization not found",
args: args{
ctx: context.Background(),
org: &chronograf.Organization{},
},
wantErr: true,
},
{
name: "Get Organization",
args: args{
ctx: context.Background(),
org: &chronograf.Organization{
Name: "EE - Evil Empire",
},
},
want: &chronograf.Organization{
Name: "EE - Evil Empire",
},
addFirst: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.OrganizationsStore
if tt.addFirst {
tt.args.org, err = s.Add(tt.args.ctx, tt.args.org)
if err != nil {
t.Fatal(err)
}
}
got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{Name: &tt.args.org.Name})
if (err != nil) != tt.wantErr {
t.Errorf("%q. OrganizationsStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
if tt.wantErr {
return
}
if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" {
t.Errorf("%q. OrganizationsStore.Get():\n-got/+want\ndiff %s", tt.name, diff)
}
})
}
}
func TestOrganizationsStore_GetWithID(t *testing.T) {
type args struct {
ctx context.Context
org *chronograf.Organization
}
tests := []struct {
name string
args args
want *chronograf.Organization
wantErr bool
addFirst bool
}{
{
name: "Organization not found",
args: args{
ctx: context.Background(),
org: &chronograf.Organization{
ID: "1234",
},
},
wantErr: true,
},
{
name: "Get Organization",
args: args{
ctx: context.Background(),
org: &chronograf.Organization{
Name: "EE - Evil Empire",
},
},
want: &chronograf.Organization{
Name: "EE - Evil Empire",
},
addFirst: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.OrganizationsStore
if tt.addFirst {
tt.args.org, err = s.Add(tt.args.ctx, tt.args.org)
if err != nil {
t.Fatal(err)
}
}
got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{ID: &tt.args.org.ID})
if (err != nil) != tt.wantErr {
t.Errorf("%q. OrganizationsStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr)
return
}
if tt.wantErr {
return
}
if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" {
t.Errorf("%q. OrganizationsStore.Get():\n-got/+want\ndiff %s", tt.name, diff)
}
})
}
}
func TestOrganizationsStore_All(t *testing.T) {
type args struct {
ctx context.Context
orgs []chronograf.Organization
}
tests := []struct {
name string
args args
want []chronograf.Organization
addFirst bool
}{
{
name: "Get Organizations",
args: args{
ctx: context.Background(),
orgs: []chronograf.Organization{
{
Name: "EE - Evil Empire",
DefaultRole: roles.MemberRoleName,
},
{
Name: "The Good Place",
DefaultRole: roles.EditorRoleName,
},
},
},
want: []chronograf.Organization{
{
Name: "EE - Evil Empire",
DefaultRole: roles.MemberRoleName,
},
{
Name: "The Good Place",
DefaultRole: roles.EditorRoleName,
},
{
Name: bolt.DefaultOrganizationName,
DefaultRole: bolt.DefaultOrganizationRole,
},
},
addFirst: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.OrganizationsStore
if tt.addFirst {
for _, org := range tt.args.orgs {
_, err = s.Add(tt.args.ctx, &org)
if err != nil {
t.Fatal(err)
}
}
}
got, err := s.All(tt.args.ctx)
if err != nil {
t.Fatal(err)
return
}
if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" {
t.Errorf("%q. OrganizationsStore.All():\n-got/+want\ndiff %s", tt.name, diff)
}
})
}
}
func TestOrganizationsStore_Update(t *testing.T) {
type fields struct {
orgs []chronograf.Organization
}
type args struct {
ctx context.Context
initial *chronograf.Organization
updates *chronograf.Organization
}
tests := []struct {
name string
fields fields
args args
addFirst bool
want *chronograf.Organization
wantErr bool
}{
{
name: "No such organization",
fields: fields{},
args: args{
ctx: context.Background(),
initial: &chronograf.Organization{
ID: "1234",
Name: "The Okay Place",
},
updates: &chronograf.Organization{},
},
wantErr: true,
},
{
name: "Update organization name",
fields: fields{},
args: args{
ctx: context.Background(),
initial: &chronograf.Organization{
Name: "The Good Place",
},
updates: &chronograf.Organization{
Name: "The Bad Place",
},
},
want: &chronograf.Organization{
Name: "The Bad Place",
},
addFirst: true,
},
{
name: "Update organization default role",
fields: fields{},
args: args{
ctx: context.Background(),
initial: &chronograf.Organization{
Name: "The Good Place",
},
updates: &chronograf.Organization{
DefaultRole: roles.ViewerRoleName,
},
},
want: &chronograf.Organization{
Name: "The Good Place",
DefaultRole: roles.ViewerRoleName,
},
addFirst: true,
},
{
name: "Update organization name and default role",
fields: fields{},
args: args{
ctx: context.Background(),
initial: &chronograf.Organization{
Name: "The Good Place",
DefaultRole: roles.AdminRoleName,
},
updates: &chronograf.Organization{
Name: "The Bad Place",
DefaultRole: roles.ViewerRoleName,
},
},
want: &chronograf.Organization{
Name: "The Bad Place",
DefaultRole: roles.ViewerRoleName,
},
addFirst: true,
},
{
name: "Update organization name, role",
fields: fields{},
args: args{
ctx: context.Background(),
initial: &chronograf.Organization{
Name: "The Good Place",
DefaultRole: roles.ViewerRoleName,
},
updates: &chronograf.Organization{
Name: "The Bad Place",
DefaultRole: roles.AdminRoleName,
},
},
want: &chronograf.Organization{
Name: "The Bad Place",
DefaultRole: roles.AdminRoleName,
},
addFirst: true,
},
{
name: "Update organization name",
fields: fields{},
args: args{
ctx: context.Background(),
initial: &chronograf.Organization{
Name: "The Good Place",
DefaultRole: roles.EditorRoleName,
},
updates: &chronograf.Organization{
Name: "The Bad Place",
},
},
want: &chronograf.Organization{
Name: "The Bad Place",
DefaultRole: roles.EditorRoleName,
},
addFirst: true,
},
{
name: "Update organization name",
fields: fields{},
args: args{
ctx: context.Background(),
initial: &chronograf.Organization{
Name: "The Good Place",
},
updates: &chronograf.Organization{
Name: "The Bad Place",
},
},
want: &chronograf.Organization{
Name: "The Bad Place",
},
addFirst: true,
},
{
name: "Update organization name - name already taken",
fields: fields{
orgs: []chronograf.Organization{
{
Name: "The Bad Place",
},
},
},
args: args{
ctx: context.Background(),
initial: &chronograf.Organization{
Name: "The Good Place",
},
updates: &chronograf.Organization{
Name: "The Bad Place",
},
},
wantErr: true,
addFirst: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.OrganizationsStore
for _, org := range tt.fields.orgs {
_, err = s.Add(tt.args.ctx, &org)
if err != nil {
t.Fatal(err)
}
}
if tt.addFirst {
tt.args.initial, err = s.Add(tt.args.ctx, tt.args.initial)
if err != nil {
t.Fatal(err)
}
}
if tt.args.updates.Name != "" {
tt.args.initial.Name = tt.args.updates.Name
}
if tt.args.updates.DefaultRole != "" {
tt.args.initial.DefaultRole = tt.args.updates.DefaultRole
}
if err := s.Update(tt.args.ctx, tt.args.initial); (err != nil) != tt.wantErr {
t.Errorf("%q. OrganizationsStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
// for the empty test
if tt.want == nil {
continue
}
got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{Name: &tt.args.initial.Name})
if err != nil {
t.Fatalf("failed to get organization: %v", err)
}
if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" {
t.Errorf("%q. OrganizationsStore.Update():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}
func TestOrganizationStore_Delete(t *testing.T) {
type args struct {
ctx context.Context
org *chronograf.Organization
}
tests := []struct {
name string
args args
addFirst bool
wantErr bool
}{
{
name: "No such organization",
args: args{
ctx: context.Background(),
org: &chronograf.Organization{
ID: "10",
},
},
wantErr: true,
},
{
name: "Delete new organization",
args: args{
ctx: context.Background(),
org: &chronograf.Organization{
Name: "The Deleted Place",
},
},
addFirst: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.OrganizationsStore
if tt.addFirst {
tt.args.org, _ = s.Add(tt.args.ctx, tt.args.org)
}
if err := s.Delete(tt.args.ctx, tt.args.org); (err != nil) != tt.wantErr {
t.Errorf("%q. OrganizationsStore.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
}
}
func TestOrganizationStore_DeleteDefaultOrg(t *testing.T) {
type args struct {
ctx context.Context
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "Delete the default organization",
args: args{
ctx: context.Background(),
},
wantErr: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.OrganizationsStore
defaultOrg, err := s.DefaultOrganization(tt.args.ctx)
if err != nil {
t.Fatal(err)
}
if err := s.Delete(tt.args.ctx, defaultOrg); (err != nil) != tt.wantErr {
t.Errorf("%q. OrganizationsStore.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
}
}
func TestOrganizationsStore_Add(t *testing.T) {
type fields struct {
orgs []chronograf.Organization
}
type args struct {
ctx context.Context
org *chronograf.Organization
}
tests := []struct {
name string
fields fields
args args
want *chronograf.Organization
wantErr bool
}{
{
name: "Add organization - organization already exists",
fields: fields{
orgs: []chronograf.Organization{
{
Name: "The Good Place",
},
},
},
args: args{
ctx: context.Background(),
org: &chronograf.Organization{
Name: "The Good Place",
},
},
wantErr: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.OrganizationsStore
for _, org := range tt.fields.orgs {
_, err = s.Add(tt.args.ctx, &org)
if err != nil {
t.Fatal(err)
}
}
_, err = s.Add(tt.args.ctx, tt.args.org)
if (err != nil) != tt.wantErr {
t.Errorf("%q. OrganizationsStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
// for the empty test
if tt.want == nil {
continue
}
got, err := s.Get(tt.args.ctx, chronograf.OrganizationQuery{Name: &tt.args.org.Name})
if err != nil {
t.Fatalf("failed to get organization: %v", err)
}
if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" {
t.Errorf("%q. OrganizationsStore.Update():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}
func TestOrganizationsStore_DefaultOrganization(t *testing.T) {
type fields struct {
orgs []chronograf.Organization
}
type args struct {
ctx context.Context
}
tests := []struct {
name string
fields fields
args args
want *chronograf.Organization
wantErr bool
}{
{
name: "Get Default Organization",
fields: fields{
orgs: []chronograf.Organization{
{
Name: "The Good Place",
},
},
},
args: args{
ctx: context.Background(),
},
want: &chronograf.Organization{
ID: string(bolt.DefaultOrganizationID),
Name: bolt.DefaultOrganizationName,
DefaultRole: bolt.DefaultOrganizationRole,
},
wantErr: false,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.OrganizationsStore
for _, org := range tt.fields.orgs {
_, err = s.Add(tt.args.ctx, &org)
if err != nil {
t.Fatal(err)
}
}
got, err := s.DefaultOrganization(tt.args.ctx)
if (err != nil) != tt.wantErr {
t.Errorf("%q. OrganizationsStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
if tt.want == nil {
continue
}
if diff := cmp.Diff(got, tt.want, orgCmpOptions...); diff != "" {
t.Errorf("%q. OrganizationsStore.Update():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}

View File

@ -1,183 +0,0 @@
package bolt
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
bolt "go.etcd.io/bbolt"
)
// Ensure ServersStore implements chronograf.ServersStore.
var _ chronograf.ServersStore = &ServersStore{}
// ServersBucket is the bolt bucket to store lists of servers
var ServersBucket = []byte("Servers")
// ServersStore is the bolt implementation to store servers in a store.
// Used store servers that are associated in some way with a source
type ServersStore struct {
client *Client
}
func (s *ServersStore) Migrate(ctx context.Context) error {
servers, err := s.All(ctx)
if err != nil {
return err
}
defaultOrg, err := s.client.OrganizationsStore.DefaultOrganization(ctx)
if err != nil {
return err
}
for _, server := range servers {
if server.Organization == "" {
server.Organization = defaultOrg.ID
if err := s.Update(ctx, server); err != nil {
return nil
}
}
}
return nil
}
// All returns all known servers
func (s *ServersStore) All(ctx context.Context) ([]chronograf.Server, error) {
var srcs []chronograf.Server
if err := s.client.db.View(func(tx *bolt.Tx) error {
var err error
srcs, err = s.all(ctx, tx)
if err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return srcs, nil
}
// Add creates a new Server in the ServerStore.
func (s *ServersStore) Add(ctx context.Context, src chronograf.Server) (chronograf.Server, error) {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(ServersBucket)
seq, err := b.NextSequence()
if err != nil {
return err
}
src.ID = int(seq)
// make the newly added source "active"
s.resetActiveServer(ctx, tx)
src.Active = true
if v, err := internal.MarshalServer(src); err != nil {
return err
} else if err := b.Put(itob(src.ID), v); err != nil {
return err
}
return nil
}); err != nil {
return chronograf.Server{}, err
}
return src, nil
}
// Delete removes the Server from the ServersStore
func (s *ServersStore) Delete(ctx context.Context, src chronograf.Server) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
if err := tx.Bucket(ServersBucket).Delete(itob(src.ID)); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}
// Get returns a Server if the id exists.
func (s *ServersStore) Get(ctx context.Context, id int) (chronograf.Server, error) {
var src chronograf.Server
if err := s.client.db.View(func(tx *bolt.Tx) error {
if v := tx.Bucket(ServersBucket).Get(itob(id)); v == nil {
return chronograf.ErrServerNotFound
} else if err := internal.UnmarshalServer(v, &src); err != nil {
return err
}
return nil
}); err != nil {
return chronograf.Server{}, err
}
return src, nil
}
// Update a Server
func (s *ServersStore) Update(ctx context.Context, src chronograf.Server) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
// Get an existing server with the same ID.
b := tx.Bucket(ServersBucket)
if v := b.Get(itob(src.ID)); v == nil {
return chronograf.ErrServerNotFound
}
// only one server can be active at a time
if src.Active {
s.resetActiveServer(ctx, tx)
}
if v, err := internal.MarshalServer(src); err != nil {
return err
} else if err := b.Put(itob(src.ID), v); err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}
func (s *ServersStore) all(ctx context.Context, tx *bolt.Tx) ([]chronograf.Server, error) {
var srcs []chronograf.Server
if err := tx.Bucket(ServersBucket).ForEach(func(k, v []byte) error {
var src chronograf.Server
if err := internal.UnmarshalServer(v, &src); err != nil {
return err
}
srcs = append(srcs, src)
return nil
}); err != nil {
return srcs, err
}
return srcs, nil
}
// resetActiveServer unsets the Active flag on all sources
func (s *ServersStore) resetActiveServer(ctx context.Context, tx *bolt.Tx) error {
b := tx.Bucket(ServersBucket)
srcs, err := s.all(ctx, tx)
if err != nil {
return err
}
for _, other := range srcs {
if other.Active {
other.Active = false
if v, err := internal.MarshalServer(other); err != nil {
return err
} else if err := b.Put(itob(other.ID), v); err != nil {
return err
}
}
}
return nil
}

View File

@ -1,114 +0,0 @@
package bolt_test
import (
"context"
"reflect"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
// Ensure an ServerStore can store, retrieve, update, and delete servers.
func TestServerStore(t *testing.T) {
c, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer c.Close()
s := c.ServersStore
srcs := []chronograf.Server{
chronograf.Server{
Name: "Of Truth",
SrcID: 10,
Username: "marty",
Password: "I❤ jennifer parker",
URL: "toyota-hilux.lyon-estates.local",
Active: false,
Organization: "133",
InsecureSkipVerify: true,
},
chronograf.Server{
Name: "HipToBeSquare",
SrcID: 12,
Username: "calvinklein",
Password: "chuck b3rry",
URL: "toyota-hilux.lyon-estates.local",
Active: false,
Organization: "133",
InsecureSkipVerify: false,
},
}
// Add new srcs.
ctx := context.Background()
for i, src := range srcs {
if srcs[i], err = s.Add(ctx, src); err != nil {
t.Fatal(err)
}
// Confirm first src in the store is the same as the original.
if actual, err := s.Get(ctx, srcs[i].ID); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(actual, srcs[i]) {
t.Fatalf("server loaded is different then server saved; actual: %v, expected %v", actual, srcs[i])
}
}
// Update server.
srcs[0].Username = "calvinklein"
srcs[1].Name = "Enchantment Under the Sea Dance"
srcs[1].Organization = "1234"
if err := s.Update(ctx, srcs[0]); err != nil {
t.Fatal(err)
} else if err := s.Update(ctx, srcs[1]); err != nil {
t.Fatal(err)
}
// Confirm servers have updated.
if src, err := s.Get(ctx, srcs[0].ID); err != nil {
t.Fatal(err)
} else if src.Username != "calvinklein" {
t.Fatalf("server 0 update error: got %v, expected %v", src.Username, "calvinklein")
}
if src, err := s.Get(ctx, srcs[1].ID); err != nil {
t.Fatal(err)
} else if src.Name != "Enchantment Under the Sea Dance" {
t.Fatalf("server 1 update error: got %v, expected %v", src.Name, "Enchantment Under the Sea Dance")
} else if src.Organization != "1234" {
t.Fatalf("server 1 update error: got %v, expected %v", src.Organization, "1234")
}
// Attempt to make two active sources
srcs[0].Active = true
srcs[1].Active = true
if err := s.Update(ctx, srcs[0]); err != nil {
t.Fatal(err)
} else if err := s.Update(ctx, srcs[1]); err != nil {
t.Fatal(err)
}
if actual, err := s.Get(ctx, srcs[0].ID); err != nil {
t.Fatal(err)
} else if actual.Active {
t.Fatal("Able to set two active servers when only one should be permitted")
}
// Delete an server.
if err := s.Delete(ctx, srcs[0]); err != nil {
t.Fatal(err)
}
// Confirm server has been deleted.
if _, err := s.Get(ctx, srcs[0].ID); err != chronograf.ErrServerNotFound {
t.Fatalf("server delete error: got %v, expected %v", err, chronograf.ErrServerNotFound)
}
if bsrcs, err := s.All(ctx); err != nil {
t.Fatal(err)
} else if len(bsrcs) != 1 {
t.Fatalf("After delete All returned incorrect number of srcs; got %d, expected %d", len(bsrcs), 1)
} else if !reflect.DeepEqual(bsrcs[0], srcs[1]) {
t.Fatalf("After delete All returned incorrect server; got %v, expected %v", bsrcs[0], srcs[1])
}
}

View File

@ -1,288 +0,0 @@
package bolt
import (
"context"
"math"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
"github.com/influxdata/influxdb/v2/chronograf/roles"
bolt "go.etcd.io/bbolt"
)
// Ensure SourcesStore implements chronograf.SourcesStore.
var _ chronograf.SourcesStore = &SourcesStore{}
// SourcesBucket is the bolt bucket used to store source information
var SourcesBucket = []byte("Sources")
// DefaultSource is a temporary measure for single-binary.
var DefaultSource = &chronograf.Source{
ID: math.MaxInt32, // Use large number to avoid possible collisions in older chronograf.
Name: "autogen",
Type: "influx",
URL: "http://localhost:8086",
Default: false,
}
// SourcesStore is a bolt implementation to store time-series source information.
type SourcesStore struct {
client *Client
}
// Migrate adds the default source to an existing boltdb.
func (s *SourcesStore) Migrate(ctx context.Context) error {
sources, err := s.All(ctx)
if err != nil {
return err
}
if len(sources) == 0 {
if err := s.Put(ctx, DefaultSource); err != nil {
return err
}
}
defaultOrg, err := s.client.OrganizationsStore.DefaultOrganization(ctx)
if err != nil {
return err
}
for _, source := range sources {
if source.Organization == "" {
source.Organization = defaultOrg.ID
}
if source.Role == "" {
source.Role = roles.ViewerRoleName
}
if err := s.Update(ctx, source); err != nil {
return nil
}
}
return nil
}
// All returns all known sources
func (s *SourcesStore) All(ctx context.Context) ([]chronograf.Source, error) {
var srcs []chronograf.Source
if err := s.client.db.View(func(tx *bolt.Tx) error {
var err error
srcs, err = s.all(ctx, tx)
if err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return srcs, nil
}
// Add creates a new Source in the SourceStore.
func (s *SourcesStore) Add(ctx context.Context, src chronograf.Source) (chronograf.Source, error) {
// force first source added to be default
if srcs, err := s.All(ctx); err != nil {
return chronograf.Source{}, err
} else if len(srcs) == 0 {
src.Default = true
}
if err := s.client.db.Update(func(tx *bolt.Tx) error {
return s.add(ctx, &src, tx)
}); err != nil {
return chronograf.Source{}, err
}
return src, nil
}
// Delete removes the Source from the SourcesStore
func (s *SourcesStore) Delete(ctx context.Context, src chronograf.Source) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
if err := s.setRandomDefault(ctx, src, tx); err != nil {
return err
}
return s.delete(ctx, src, tx)
}); err != nil {
return err
}
return nil
}
// Get returns a Source if the id exists.
func (s *SourcesStore) Get(ctx context.Context, id int) (chronograf.Source, error) {
var src chronograf.Source
if err := s.client.db.View(func(tx *bolt.Tx) error {
var err error
src, err = s.get(ctx, id, tx)
if err != nil {
return err
}
return nil
}); err != nil {
return chronograf.Source{}, err
}
return src, nil
}
// Update a Source
func (s *SourcesStore) Update(ctx context.Context, src chronograf.Source) error {
if err := s.client.db.Update(func(tx *bolt.Tx) error {
return s.update(ctx, src, tx)
}); err != nil {
return err
}
return nil
}
func (s *SourcesStore) all(ctx context.Context, tx *bolt.Tx) ([]chronograf.Source, error) {
var srcs []chronograf.Source
if err := tx.Bucket(SourcesBucket).ForEach(func(k, v []byte) error {
var src chronograf.Source
if err := internal.UnmarshalSource(v, &src); err != nil {
return err
}
srcs = append(srcs, src)
return nil
}); err != nil {
return srcs, err
}
return srcs, nil
}
// Put updates the source.
func (s *SourcesStore) Put(ctx context.Context, src *chronograf.Source) error {
return s.client.db.Update(func(tx *bolt.Tx) error {
return s.put(ctx, src, tx)
})
}
func (s *SourcesStore) put(ctx context.Context, src *chronograf.Source, tx *bolt.Tx) error {
b := tx.Bucket(SourcesBucket)
if v, err := internal.MarshalSource(*src); err != nil {
return err
} else if err := b.Put(itob(src.ID), v); err != nil {
return err
}
return nil
}
func (s *SourcesStore) add(ctx context.Context, src *chronograf.Source, tx *bolt.Tx) error {
b := tx.Bucket(SourcesBucket)
seq, err := b.NextSequence()
if err != nil {
return err
}
src.ID = int(seq)
if src.Default {
if err := s.resetDefaultSource(ctx, tx); err != nil {
return err
}
}
if v, err := internal.MarshalSource(*src); err != nil {
return err
} else if err := b.Put(itob(src.ID), v); err != nil {
return err
}
return nil
}
func (s *SourcesStore) delete(ctx context.Context, src chronograf.Source, tx *bolt.Tx) error {
if err := tx.Bucket(SourcesBucket).Delete(itob(src.ID)); err != nil {
return err
}
return nil
}
func (s *SourcesStore) get(ctx context.Context, id int, tx *bolt.Tx) (chronograf.Source, error) {
var src chronograf.Source
if v := tx.Bucket(SourcesBucket).Get(itob(id)); v == nil {
return src, chronograf.ErrSourceNotFound
} else if err := internal.UnmarshalSource(v, &src); err != nil {
return src, err
}
return src, nil
}
func (s *SourcesStore) update(ctx context.Context, src chronograf.Source, tx *bolt.Tx) error {
// Get an existing source with the same ID.
b := tx.Bucket(SourcesBucket)
if v := b.Get(itob(src.ID)); v == nil {
return chronograf.ErrSourceNotFound
}
if src.Default {
if err := s.resetDefaultSource(ctx, tx); err != nil {
return err
}
}
if v, err := internal.MarshalSource(src); err != nil {
return err
} else if err := b.Put(itob(src.ID), v); err != nil {
return err
}
return nil
}
// resetDefaultSource unsets the Default flag on all sources
func (s *SourcesStore) resetDefaultSource(ctx context.Context, tx *bolt.Tx) error {
b := tx.Bucket(SourcesBucket)
srcs, err := s.all(ctx, tx)
if err != nil {
return err
}
for _, other := range srcs {
if other.Default {
other.Default = false
if v, err := internal.MarshalSource(other); err != nil {
return err
} else if err := b.Put(itob(other.ID), v); err != nil {
return err
}
}
}
return nil
}
// setRandomDefault will locate a source other than the provided
// chronograf.Source and set it as the default source. If no other sources are
// available, the provided source will be set to the default source if is not
// already. It assumes that the provided chronograf.Source has been persisted.
func (s *SourcesStore) setRandomDefault(ctx context.Context, src chronograf.Source, tx *bolt.Tx) error {
// Check if requested source is the current default
if target, err := s.get(ctx, src.ID, tx); err != nil {
return err
} else if target.Default {
// Locate another source to be the new default
srcs, err := s.all(ctx, tx)
if err != nil {
return err
}
var other *chronograf.Source
for idx := range srcs {
other = &srcs[idx]
// avoid selecting the source we're about to delete as the new default
if other.ID != target.ID {
break
}
}
// set the other to be the default
other.Default = true
if err := s.update(ctx, *other, tx); err != nil {
return err
}
}
return nil
}

View File

@ -1,200 +0,0 @@
package bolt_test
import (
"context"
"reflect"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt"
)
// Ensure an SourceStore can store, retrieve, update, and delete sources.
func TestSourceStore(t *testing.T) {
c, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer c.Close()
s := c.SourcesStore
srcs := []chronograf.Source{
chronograf.Source{
Name: "Of Truth",
Type: "influx",
Username: "marty",
Password: "I❤ jennifer parker",
URL: "toyota-hilux.lyon-estates.local",
Default: true,
Organization: "1337",
DefaultRP: "pineapple",
},
chronograf.Source{
Name: "HipToBeSquare",
Type: "influx",
Username: "calvinklein",
Password: "chuck b3rry",
URL: "toyota-hilux.lyon-estates.local",
Default: true,
Organization: "1337",
},
chronograf.Source{
Name: "HipToBeSquare",
Type: "influx",
Username: "calvinklein",
Password: "chuck b3rry",
URL: "https://toyota-hilux.lyon-estates.local",
InsecureSkipVerify: true,
Default: false,
Organization: "1337",
},
}
ctx := context.Background()
// Add new srcs.
for i, src := range srcs {
if srcs[i], err = s.Add(ctx, src); err != nil {
t.Fatal(err)
}
// Confirm first src in the store is the same as the original.
if actual, err := s.Get(ctx, srcs[i].ID); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(actual, srcs[i]) {
t.Fatalf("source loaded is different then source saved; actual: %v, expected %v", actual, srcs[i])
}
}
// Update source.
srcs[0].Username = "calvinklein"
srcs[1].Name = "Enchantment Under the Sea Dance"
srcs[2].DefaultRP = "cubeapple"
mustUpdateSource(t, s, srcs[0])
mustUpdateSource(t, s, srcs[1])
mustUpdateSource(t, s, srcs[2])
// Confirm sources have updated.
if src, err := s.Get(ctx, srcs[0].ID); err != nil {
t.Fatal(err)
} else if src.Username != "calvinklein" {
t.Fatalf("source 0 update error: got %v, expected %v", src.Username, "calvinklein")
}
if src, err := s.Get(ctx, srcs[1].ID); err != nil {
t.Fatal(err)
} else if src.Name != "Enchantment Under the Sea Dance" {
t.Fatalf("source 1 update error: got %v, expected %v", src.Name, "Enchantment Under the Sea Dance")
}
if src, err := s.Get(ctx, srcs[2].ID); err != nil {
t.Fatal(err)
} else if src.DefaultRP != "cubeapple" {
t.Fatalf("source 2 update error: got %v, expected %v", src.DefaultRP, "cubeapple")
}
// Attempt to make two default sources
srcs[0].Default = true
srcs[1].Default = true
mustUpdateSource(t, s, srcs[0])
mustUpdateSource(t, s, srcs[1])
if actual, err := s.Get(ctx, srcs[0].ID); err != nil {
t.Fatal(err)
} else if actual.Default {
t.Fatal("Able to set two default sources when only one should be permitted")
}
// Attempt to add a new default source
srcs = append(srcs, chronograf.Source{
Name: "Biff Tannen",
Type: "influx",
Username: "HELLO",
Password: "MCFLY",
URL: "anybody.in.there.local",
Default: true,
Organization: "1892",
})
srcs[3] = mustAddSource(t, s, srcs[3])
if srcs, err := s.All(ctx); err != nil {
t.Fatal(err)
} else {
defaults := 0
for _, src := range srcs {
if src.Default {
defaults++
}
}
if defaults != 1 {
t.Fatal("Able to add more than one default source")
}
}
// Delete an source.
if err := s.Delete(ctx, srcs[0]); err != nil {
t.Fatal(err)
}
// Confirm source has been deleted.
if _, err := s.Get(ctx, srcs[0].ID); err != chronograf.ErrSourceNotFound {
t.Fatalf("source delete error: got %v, expected %v", err, chronograf.ErrSourceNotFound)
}
// Delete the other source we created
if err := s.Delete(ctx, srcs[3]); err != nil {
t.Fatal(err)
}
if bsrcs, err := s.All(ctx); err != nil {
t.Fatal(err)
} else if len(bsrcs) != 3 {
t.Fatalf("After delete All returned incorrect number of srcs; got %d, expected %d", len(bsrcs), 3)
} else if !reflect.DeepEqual(bsrcs[0], srcs[1]) {
t.Fatalf("After delete All returned incorrect source; got %v, expected %v", bsrcs[0], srcs[1])
}
// Delete the final sources
if err := s.Delete(ctx, srcs[1]); err != nil {
t.Fatal(err)
}
if err := s.Delete(ctx, srcs[2]); err != nil {
t.Fatal(err)
}
if err := s.Delete(ctx, *bolt.DefaultSource); err != nil {
t.Fatal(err)
}
// Try to add one source as a non-default and ensure that it becomes a
// default
src := mustAddSource(t, s, chronograf.Source{
Name: "Biff Tannen",
Type: "influx",
Username: "HELLO",
Password: "MCFLY",
URL: "anybody.in.there.local",
Default: false,
Organization: "1234",
})
if actual, err := s.Get(ctx, src.ID); err != nil {
t.Fatal(err)
} else if !actual.Default {
t.Fatal("Expected first source added to be default but wasn't")
}
}
func mustUpdateSource(t *testing.T, s *bolt.SourcesStore, src chronograf.Source) {
ctx := context.Background()
if err := s.Update(ctx, src); err != nil {
t.Fatal(err)
}
}
func mustAddSource(t *testing.T, s *bolt.SourcesStore, src chronograf.Source) chronograf.Source {
ctx := context.Background()
if src, err := s.Add(ctx, src); err != nil {
t.Fatal(err)
return src
} else {
return src
}
}

View File

@ -1,196 +0,0 @@
package bolt
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/bolt/internal"
bolt "go.etcd.io/bbolt"
)
// Ensure UsersStore implements chronograf.UsersStore.
var _ chronograf.UsersStore = &UsersStore{}
// UsersBucket is used to store users local to chronograf
var UsersBucket = []byte("UsersV2")
// UsersStore uses bolt to store and retrieve users
type UsersStore struct {
client *Client
}
// get searches the UsersStore for user with id and returns the bolt representation
func (s *UsersStore) get(ctx context.Context, id uint64) (*chronograf.User, error) {
var u chronograf.User
err := s.client.db.View(func(tx *bolt.Tx) error {
v := tx.Bucket(UsersBucket).Get(u64tob(id))
if v == nil {
return chronograf.ErrUserNotFound
}
return internal.UnmarshalUser(v, &u)
})
if err != nil {
return nil, err
}
return &u, nil
}
func (s *UsersStore) each(ctx context.Context, fn func(*chronograf.User)) error {
return s.client.db.View(func(tx *bolt.Tx) error {
return tx.Bucket(UsersBucket).ForEach(func(k, v []byte) error {
var user chronograf.User
if err := internal.UnmarshalUser(v, &user); err != nil {
return err
}
fn(&user)
return nil
})
})
}
// Num returns the number of users in the UsersStore
func (s *UsersStore) Num(ctx context.Context) (int, error) {
count := 0
err := s.client.db.View(func(tx *bolt.Tx) error {
return tx.Bucket(UsersBucket).ForEach(func(k, v []byte) error {
count++
return nil
})
})
if err != nil {
return 0, err
}
return count, nil
}
// Get searches the UsersStore for user with name
func (s *UsersStore) Get(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) {
if q.ID != nil {
return s.get(ctx, *q.ID)
}
if q.Name != nil && q.Provider != nil && q.Scheme != nil {
var user *chronograf.User
err := s.each(ctx, func(u *chronograf.User) {
if user != nil {
return
}
if u.Name == *q.Name && u.Provider == *q.Provider && u.Scheme == *q.Scheme {
user = u
}
})
if err != nil {
return nil, err
}
if user == nil {
return nil, chronograf.ErrUserNotFound
}
return user, nil
}
return nil, fmt.Errorf("must specify either ID, or Name, Provider, and Scheme in UserQuery")
}
func (s *UsersStore) userExists(ctx context.Context, u *chronograf.User) (bool, error) {
_, err := s.Get(ctx, chronograf.UserQuery{
Name: &u.Name,
Provider: &u.Provider,
Scheme: &u.Scheme,
})
if err == chronograf.ErrUserNotFound {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
// Add a new User to the UsersStore.
func (s *UsersStore) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) {
if u == nil {
return nil, fmt.Errorf("user provided is nil")
}
userExists, err := s.userExists(ctx, u)
if err != nil {
return nil, err
}
if userExists {
return nil, chronograf.ErrUserAlreadyExists
}
if err := s.client.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(UsersBucket)
seq, err := b.NextSequence()
if err != nil {
return err
}
u.ID = seq
if v, err := internal.MarshalUser(u); err != nil {
return err
} else if err := b.Put(u64tob(seq), v); err != nil {
return err
}
return nil
}); err != nil {
return nil, err
}
return u, nil
}
// Delete a user from the UsersStore
func (s *UsersStore) Delete(ctx context.Context, u *chronograf.User) error {
_, err := s.get(ctx, u.ID)
if err != nil {
return err
}
return s.client.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(UsersBucket).Delete(u64tob(u.ID))
})
}
// Update a user
func (s *UsersStore) Update(ctx context.Context, u *chronograf.User) error {
_, err := s.get(ctx, u.ID)
if err != nil {
return err
}
return s.client.db.Update(func(tx *bolt.Tx) error {
if v, err := internal.MarshalUser(u); err != nil {
return err
} else if err := tx.Bucket(UsersBucket).Put(u64tob(u.ID), v); err != nil {
return err
}
return nil
})
}
// All returns all users
func (s *UsersStore) All(ctx context.Context) ([]chronograf.User, error) {
var users []chronograf.User
if err := s.client.db.View(func(tx *bolt.Tx) error {
return tx.Bucket(UsersBucket).ForEach(func(k, v []byte) error {
var user chronograf.User
if err := internal.UnmarshalUser(v, &user); err != nil {
return err
}
users = append(users, user)
return nil
})
}); err != nil {
return nil, err
}
return users, nil
}

View File

@ -1,564 +0,0 @@
package bolt_test
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/influxdata/influxdb/v2/chronograf"
)
// IgnoreFields is used because ID is created by BoltDB and cannot be predicted reliably
// EquateEmpty is used because we want nil slices, arrays, and maps to be equal to the empty map
var cmpOptions = cmp.Options{
cmpopts.IgnoreFields(chronograf.User{}, "ID"),
cmpopts.EquateEmpty(),
}
func TestUsersStore_GetWithID(t *testing.T) {
type args struct {
ctx context.Context
usr *chronograf.User
}
tests := []struct {
name string
args args
want *chronograf.User
wantErr bool
addFirst bool
}{
{
name: "User not found",
args: args{
ctx: context.Background(),
usr: &chronograf.User{
ID: 1337,
},
},
wantErr: true,
},
{
name: "Get user",
args: args{
ctx: context.Background(),
usr: &chronograf.User{
Name: "billietta",
Provider: "google",
Scheme: "oauth2",
},
},
want: &chronograf.User{
Name: "billietta",
Provider: "google",
Scheme: "oauth2",
},
addFirst: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.UsersStore
if tt.addFirst {
tt.args.usr, err = s.Add(tt.args.ctx, tt.args.usr)
if err != nil {
t.Fatal(err)
}
}
got, err := s.Get(tt.args.ctx, chronograf.UserQuery{ID: &tt.args.usr.ID})
if (err != nil) != tt.wantErr {
t.Errorf("%q. UsersStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if diff := cmp.Diff(got, tt.want, cmpOptions...); diff != "" {
t.Errorf("%q. UsersStore.Get():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}
func TestUsersStore_GetWithNameProviderScheme(t *testing.T) {
type args struct {
ctx context.Context
usr *chronograf.User
}
tests := []struct {
name string
args args
want *chronograf.User
wantErr bool
addFirst bool
}{
{
name: "User not found",
args: args{
ctx: context.Background(),
usr: &chronograf.User{
Name: "billietta",
Provider: "google",
Scheme: "oauth2",
},
},
wantErr: true,
},
{
name: "Get user",
args: args{
ctx: context.Background(),
usr: &chronograf.User{
Name: "billietta",
Provider: "google",
Scheme: "oauth2",
},
},
want: &chronograf.User{
Name: "billietta",
Provider: "google",
Scheme: "oauth2",
},
addFirst: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.UsersStore
if tt.addFirst {
tt.args.usr, err = s.Add(tt.args.ctx, tt.args.usr)
if err != nil {
t.Fatal(err)
}
}
got, err := s.Get(tt.args.ctx, chronograf.UserQuery{
Name: &tt.args.usr.Name,
Provider: &tt.args.usr.Provider,
Scheme: &tt.args.usr.Scheme,
})
if (err != nil) != tt.wantErr {
t.Errorf("%q. UsersStore.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if diff := cmp.Diff(got, tt.want, cmpOptions...); diff != "" {
t.Errorf("%q. UsersStore.Get():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}
func TestUsersStore_GetInvalid(t *testing.T) {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.UsersStore
_, err = s.Get(context.Background(), chronograf.UserQuery{})
if err == nil {
t.Errorf("Invalid Get. UsersStore.Get() error = %v", err)
}
}
func TestUsersStore_Add(t *testing.T) {
type args struct {
ctx context.Context
u *chronograf.User
addFirst bool
}
tests := []struct {
name string
args args
want *chronograf.User
wantErr bool
}{
{
name: "Add new user",
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "docbrown",
Provider: "github",
Scheme: "oauth2",
Roles: []chronograf.Role{
{
Name: "editor",
},
},
},
},
want: &chronograf.User{
Name: "docbrown",
Provider: "github",
Scheme: "oauth2",
Roles: []chronograf.Role{
{
Name: "editor",
},
},
},
},
{
name: "User already exists",
args: args{
ctx: context.Background(),
addFirst: true,
u: &chronograf.User{
Name: "docbrown",
Provider: "github",
Scheme: "oauth2",
Roles: []chronograf.Role{
{
Name: "editor",
},
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.UsersStore
if tt.args.addFirst {
_, _ = s.Add(tt.args.ctx, tt.args.u)
}
got, err := s.Add(tt.args.ctx, tt.args.u)
if (err != nil) != tt.wantErr {
t.Errorf("%q. UsersStore.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if tt.wantErr {
continue
}
got, err = s.Get(tt.args.ctx, chronograf.UserQuery{ID: &got.ID})
if err != nil {
t.Fatalf("failed to get user: %v", err)
}
if diff := cmp.Diff(got, tt.want, cmpOptions...); diff != "" {
t.Errorf("%q. UsersStore.Add():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}
func TestUsersStore_Delete(t *testing.T) {
type args struct {
ctx context.Context
user *chronograf.User
}
tests := []struct {
name string
args args
addFirst bool
wantErr bool
}{
{
name: "No such user",
args: args{
ctx: context.Background(),
user: &chronograf.User{
ID: 10,
},
},
wantErr: true,
},
{
name: "Delete new user",
args: args{
ctx: context.Background(),
user: &chronograf.User{
Name: "noone",
},
},
addFirst: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.UsersStore
if tt.addFirst {
tt.args.user, _ = s.Add(tt.args.ctx, tt.args.user)
}
if err := s.Delete(tt.args.ctx, tt.args.user); (err != nil) != tt.wantErr {
t.Errorf("%q. UsersStore.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
}
}
func TestUsersStore_Update(t *testing.T) {
type args struct {
ctx context.Context
usr *chronograf.User
roles []chronograf.Role
provider string
scheme string
name string
}
tests := []struct {
name string
args args
addFirst bool
want *chronograf.User
wantErr bool
}{
{
name: "No such user",
args: args{
ctx: context.Background(),
usr: &chronograf.User{
ID: 10,
},
},
wantErr: true,
},
{
name: "Update user role",
args: args{
ctx: context.Background(),
usr: &chronograf.User{
Name: "bobetta",
Provider: "github",
Scheme: "oauth2",
Roles: []chronograf.Role{
{
Name: "viewer",
},
},
},
roles: []chronograf.Role{
{
Name: "editor",
},
},
},
want: &chronograf.User{
Name: "bobetta",
Provider: "github",
Scheme: "oauth2",
Roles: []chronograf.Role{
{
Name: "editor",
},
},
},
addFirst: true,
},
{
name: "Update user provider and scheme",
args: args{
ctx: context.Background(),
usr: &chronograf.User{
Name: "bobetta",
Provider: "github",
Scheme: "oauth2",
},
provider: "google",
scheme: "oauth2",
name: "billietta",
},
want: &chronograf.User{
Name: "billietta",
Provider: "google",
Scheme: "oauth2",
},
addFirst: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.UsersStore
if tt.addFirst {
tt.args.usr, err = s.Add(tt.args.ctx, tt.args.usr)
if err != nil {
t.Fatal(err)
}
}
if tt.args.roles != nil {
tt.args.usr.Roles = tt.args.roles
}
if tt.args.provider != "" {
tt.args.usr.Provider = tt.args.provider
}
if tt.args.scheme != "" {
tt.args.usr.Scheme = tt.args.scheme
}
if tt.args.name != "" {
tt.args.usr.Name = tt.args.name
}
if err := s.Update(tt.args.ctx, tt.args.usr); (err != nil) != tt.wantErr {
t.Errorf("%q. UsersStore.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
// for the empty test
if tt.want == nil {
continue
}
got, err := s.Get(tt.args.ctx, chronograf.UserQuery{ID: &tt.args.usr.ID})
if err != nil {
t.Fatalf("failed to get user: %v", err)
}
if diff := cmp.Diff(got, tt.want, cmpOptions...); diff != "" {
t.Errorf("%q. UsersStore.Update():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}
func TestUsersStore_All(t *testing.T) {
tests := []struct {
name string
ctx context.Context
want []chronograf.User
addFirst bool
wantErr bool
}{
{
name: "No users",
},
{
name: "Update new user",
want: []chronograf.User{
{
Name: "howdy",
Provider: "github",
Scheme: "oauth2",
Roles: []chronograf.Role{
{
Name: "viewer",
},
},
},
{
Name: "doody",
Provider: "github",
Scheme: "oauth2",
Roles: []chronograf.Role{
{
Name: "editor",
},
},
},
},
addFirst: true,
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.UsersStore
if tt.addFirst {
for _, u := range tt.want {
s.Add(tt.ctx, &u)
}
}
gots, err := s.All(tt.ctx)
if (err != nil) != tt.wantErr {
t.Errorf("%q. UsersStore.All() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
for i, got := range gots {
if diff := cmp.Diff(got, tt.want[i], cmpOptions...); diff != "" {
t.Errorf("%q. UsersStore.All():\n-got/+want\ndiff %s", tt.name, diff)
}
}
}
}
func TestUsersStore_Num(t *testing.T) {
tests := []struct {
name string
ctx context.Context
users []chronograf.User
want int
wantErr bool
}{
{
name: "No users",
want: 0,
},
{
name: "Update new user",
want: 2,
users: []chronograf.User{
{
Name: "howdy",
Provider: "github",
Scheme: "oauth2",
Roles: []chronograf.Role{
{
Name: "viewer",
},
},
},
{
Name: "doody",
Provider: "github",
Scheme: "oauth2",
Roles: []chronograf.Role{
{
Name: "editor",
},
},
},
},
},
}
for _, tt := range tests {
client, err := NewTestClient()
if err != nil {
t.Fatal(err)
}
defer client.Close()
s := client.UsersStore
for _, u := range tt.users {
s.Add(tt.ctx, &u)
}
got, err := s.Num(tt.ctx)
if (err != nil) != tt.wantErr {
t.Errorf("%q. UsersStore.Num() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if got != tt.want {
t.Errorf("%q. UsersStore.Num() = %d. want %d", tt.name, got, tt.want)
}
}
}

View File

@ -1,19 +0,0 @@
package bolt
import (
"encoding/binary"
)
// itob returns an 8-byte big endian representation of v.
func itob(v int) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(v))
return b
}
// u64tob returns an 8-byte big endian representation of v.
func u64tob(v uint64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, v)
return b
}

View File

@ -1,225 +0,0 @@
package enterprise
import (
"container/ring"
"net/url"
"strings"
"context"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/influx"
)
var _ chronograf.TimeSeries = &Client{}
// Ctrl represents administrative controls over an Influx Enterprise cluster
type Ctrl interface {
ShowCluster(ctx context.Context) (*Cluster, error)
Users(ctx context.Context, name *string) (*Users, error)
User(ctx context.Context, name string) (*User, error)
CreateUser(ctx context.Context, name, passwd string) error
DeleteUser(ctx context.Context, name string) error
ChangePassword(ctx context.Context, name, passwd string) error
SetUserPerms(ctx context.Context, name string, perms Permissions) error
UserRoles(ctx context.Context) (map[string]Roles, error)
Roles(ctx context.Context, name *string) (*Roles, error)
Role(ctx context.Context, name string) (*Role, error)
CreateRole(ctx context.Context, name string) error
DeleteRole(ctx context.Context, name string) error
SetRolePerms(ctx context.Context, name string, perms Permissions) error
SetRoleUsers(ctx context.Context, name string, users []string) error
AddRoleUsers(ctx context.Context, name string, users []string) error
RemoveRoleUsers(ctx context.Context, name string, users []string) error
}
// Client is a device for retrieving time series data from an Influx Enterprise
// cluster. It is configured using the addresses of one or more meta node URLs.
// Data node URLs are retrieved automatically from the meta nodes and queries
// are appropriately load balanced across the cluster.
type Client struct {
Ctrl
UsersStore chronograf.UsersStore
RolesStore chronograf.RolesStore
Logger chronograf.Logger
dataNodes *ring.Ring
opened bool
}
// NewClientWithTimeSeries initializes a Client with a known set of TimeSeries.
func NewClientWithTimeSeries(lg chronograf.Logger, mu string, authorizer influx.Authorizer, tls, insecure bool, series ...chronograf.TimeSeries) (*Client, error) {
metaURL, err := parseMetaURL(mu, tls)
if err != nil {
return nil, err
}
ctrl := NewMetaClient(metaURL, insecure, authorizer)
c := &Client{
Ctrl: ctrl,
UsersStore: &UserStore{
Ctrl: ctrl,
Logger: lg,
},
RolesStore: &RolesStore{
Ctrl: ctrl,
Logger: lg,
},
}
c.dataNodes = ring.New(len(series))
for _, s := range series {
c.dataNodes.Value = s
c.dataNodes = c.dataNodes.Next()
}
return c, nil
}
// NewClientWithURL initializes an Enterprise client with a URL to a Meta Node.
// Acceptable URLs include host:port combinations as well as scheme://host:port
// varieties. TLS is used when the URL contains "https" or when the TLS
// parameter is set. authorizer will add the correct `Authorization` headers
// on the out-bound request.
func NewClientWithURL(mu string, authorizer influx.Authorizer, tls bool, insecure bool, lg chronograf.Logger) (*Client, error) {
metaURL, err := parseMetaURL(mu, tls)
if err != nil {
return nil, err
}
ctrl := NewMetaClient(metaURL, insecure, authorizer)
return &Client{
Ctrl: ctrl,
UsersStore: &UserStore{
Ctrl: ctrl,
Logger: lg,
},
RolesStore: &RolesStore{
Ctrl: ctrl,
Logger: lg,
},
Logger: lg,
}, nil
}
// Connect prepares a Client to process queries. It must be called prior to calling Query
func (c *Client) Connect(ctx context.Context, src *chronograf.Source) error {
c.opened = true
// return early if we already have dataNodes
if c.dataNodes != nil {
return nil
}
cluster, err := c.Ctrl.ShowCluster(ctx)
if err != nil {
return err
}
c.dataNodes = ring.New(len(cluster.DataNodes))
for _, dn := range cluster.DataNodes {
cl := &influx.Client{
Logger: c.Logger,
}
dataSrc := &chronograf.Source{}
*dataSrc = *src
dataSrc.URL = dn.HTTPAddr
if err := cl.Connect(ctx, dataSrc); err != nil {
continue
}
c.dataNodes.Value = cl
c.dataNodes = c.dataNodes.Next()
}
return nil
}
// Query retrieves timeseries information pertaining to a specified query. It
// can be cancelled by using a provided context.
func (c *Client) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) {
if !c.opened {
return nil, chronograf.ErrUninitialized
}
return c.nextDataNode().Query(ctx, q)
}
// Write records points into a time series
func (c *Client) Write(ctx context.Context, points []chronograf.Point) error {
if !c.opened {
return chronograf.ErrUninitialized
}
return c.nextDataNode().Write(ctx, points)
}
// Users is the interface to the users within Influx Enterprise
func (c *Client) Users(context.Context) chronograf.UsersStore {
return c.UsersStore
}
// Roles provide a grouping of permissions given to a grouping of users
func (c *Client) Roles(ctx context.Context) (chronograf.RolesStore, error) {
return c.RolesStore, nil
}
// Permissions returns all Influx Enterprise permission strings
func (c *Client) Permissions(context.Context) chronograf.Permissions {
all := chronograf.Allowances{
"NoPermissions",
"ViewAdmin",
"ViewChronograf",
"CreateDatabase",
"CreateUserAndRole",
"AddRemoveNode",
"DropDatabase",
"DropData",
"ReadData",
"WriteData",
"Rebalance",
"ManageShard",
"ManageContinuousQuery",
"ManageQuery",
"ManageSubscription",
"Monitor",
"CopyShard",
"KapacitorAPI",
"KapacitorConfigAPI",
}
return chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: all,
},
{
Scope: chronograf.DBScope,
Allowed: all,
},
}
}
// nextDataNode retrieves the next available data node
func (c *Client) nextDataNode() chronograf.TimeSeries {
c.dataNodes = c.dataNodes.Next()
return c.dataNodes.Value.(chronograf.TimeSeries)
}
// parseMetaURL constructs a url from either a host:port combination or a
// scheme://host:port combo. The optional TLS parameter takes precedence over
// any TLS preference found in the provided URL
func parseMetaURL(mu string, tls bool) (metaURL *url.URL, err error) {
if strings.Contains(mu, "http") {
metaURL, err = url.Parse(mu)
} else {
metaURL = &url.URL{
Scheme: "http",
Host: mu,
}
}
if tls {
metaURL.Scheme = "https"
}
return
}

View File

@ -1,269 +0,0 @@
package enterprise_test
import (
"context"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/enterprise"
"github.com/influxdata/influxdb/v2/chronograf/influx"
)
func Test_Enterprise_FetchesDataNodes(t *testing.T) {
t.Parallel()
showClustersCalled := false
ctrl := &mockCtrl{
showCluster: func(ctx context.Context) (*enterprise.Cluster, error) {
showClustersCalled = true
return &enterprise.Cluster{}, nil
},
}
cl := &enterprise.Client{
Ctrl: ctrl,
}
bg := context.Background()
err := cl.Connect(bg, &chronograf.Source{})
if err != nil {
t.Fatal("Unexpected error while creating enterprise client. err:", err)
}
if !showClustersCalled {
t.Fatal("Expected request to meta node but none was issued")
}
}
func Test_Enterprise_IssuesQueries(t *testing.T) {
t.Parallel()
called := false
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
called = true
if r.URL.Path != "/query" {
t.Fatal("Expected request to '/query' but was", r.URL.Path)
}
rw.Write([]byte(`{}`))
}))
defer ts.Close()
cl := &enterprise.Client{
Ctrl: NewMockControlClient(ts.URL),
Logger: &chronograf.NoopLogger{},
}
err := cl.Connect(context.Background(), &chronograf.Source{})
if err != nil {
t.Fatal("Unexpected error initializing client: err:", err)
}
_, err = cl.Query(context.Background(), chronograf.Query{Command: "show shards", DB: "_internal", RP: "autogen"})
if err != nil {
t.Fatal("Unexpected error while querying data node: err:", err)
}
if !called {
t.Fatal("Expected request to data node but none was received")
}
}
func Test_Enterprise_AdvancesDataNodes(t *testing.T) {
m1 := NewMockTimeSeries("http://host-1.example.com:8086")
m2 := NewMockTimeSeries("http://host-2.example.com:8086")
cl, err := enterprise.NewClientWithTimeSeries(
&chronograf.NoopLogger{},
"http://meta.example.com:8091",
&influx.BasicAuth{
Username: "marty",
Password: "thelake",
},
false,
false,
chronograf.TimeSeries(m1),
chronograf.TimeSeries(m2))
if err != nil {
t.Error("Unexpected error while initializing client: err:", err)
}
err = cl.Connect(context.Background(), &chronograf.Source{})
if err != nil {
t.Error("Unexpected error while initializing client: err:", err)
}
_, err = cl.Query(context.Background(), chronograf.Query{Command: "show shards", DB: "_internal", RP: "autogen"})
if err != nil {
t.Fatal("Unexpected error while issuing query: err:", err)
}
_, err = cl.Query(context.Background(), chronograf.Query{Command: "show shards", DB: "_internal", RP: "autogen"})
if err != nil {
t.Fatal("Unexpected error while issuing query: err:", err)
}
if m1.QueryCtr != 1 || m2.QueryCtr != 1 {
t.Fatalf("Expected m1.Query to be called once but was %d. Expected m2.Query to be called once but was %d\n", m1.QueryCtr, m2.QueryCtr)
}
}
func Test_Enterprise_NewClientWithURL(t *testing.T) {
t.Parallel()
urls := []struct {
name string
url string
username string
password string
tls bool
insecureSkipVerify bool
wantErr bool
}{
{
name: "no tls should have no error",
url: "http://localhost:8086",
},
{
name: "tls should have no error",
url: "https://localhost:8086",
},
{
name: "no tls but with basic auth",
url: "http://localhost:8086",
username: "username",
password: "password",
},
{
name: "tls request but url is not tls should not error",
url: "http://localhost:8086",
tls: true,
},
{
name: "https with tls and with insecureSkipVerify should not error",
url: "https://localhost:8086",
tls: true,
insecureSkipVerify: true,
},
{
name: "URL does not require http or https",
url: "localhost:8086",
},
{
name: "URL with TLS request should not error",
url: "localhost:8086",
tls: true,
},
{
name: "invalid URL causes error",
url: ":http",
wantErr: true,
},
}
for _, testURL := range urls {
_, err := enterprise.NewClientWithURL(
testURL.url,
&influx.BasicAuth{
Username: testURL.username,
Password: testURL.password,
},
testURL.tls,
testURL.insecureSkipVerify,
&chronograf.NoopLogger{})
if err != nil && !testURL.wantErr {
t.Errorf("Unexpected error creating Client with URL %s and TLS preference %t. err: %s", testURL.url, testURL.tls, err.Error())
} else if err == nil && testURL.wantErr {
t.Errorf("Expected error creating Client with URL %s and TLS preference %t", testURL.url, testURL.tls)
}
}
}
func Test_Enterprise_ComplainsIfNotOpened(t *testing.T) {
m1 := NewMockTimeSeries("http://host-1.example.com:8086")
cl, err := enterprise.NewClientWithTimeSeries(
&chronograf.NoopLogger{},
"http://meta.example.com:8091",
&influx.BasicAuth{
Username: "docbrown",
Password: "1.21 gigawatts",
},
false, false, chronograf.TimeSeries(m1))
if err != nil {
t.Error("Expected nil, but was this err:", err)
}
_, err = cl.Query(context.Background(), chronograf.Query{Command: "show shards", DB: "_internal", RP: "autogen"})
if err != chronograf.ErrUninitialized {
t.Error("Expected ErrUninitialized, but was this err:", err)
}
}
func TestClient_Permissions(t *testing.T) {
tests := []struct {
name string
want chronograf.Permissions
}{
{
name: "All possible enterprise permissions",
want: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{
"NoPermissions",
"ViewAdmin",
"ViewChronograf",
"CreateDatabase",
"CreateUserAndRole",
"AddRemoveNode",
"DropDatabase",
"DropData",
"ReadData",
"WriteData",
"Rebalance",
"ManageShard",
"ManageContinuousQuery",
"ManageQuery",
"ManageSubscription",
"Monitor",
"CopyShard",
"KapacitorAPI",
"KapacitorConfigAPI",
},
},
{
Scope: chronograf.DBScope,
Allowed: chronograf.Allowances{
"NoPermissions",
"ViewAdmin",
"ViewChronograf",
"CreateDatabase",
"CreateUserAndRole",
"AddRemoveNode",
"DropDatabase",
"DropData",
"ReadData",
"WriteData",
"Rebalance",
"ManageShard",
"ManageContinuousQuery",
"ManageQuery",
"ManageSubscription",
"Monitor",
"CopyShard",
"KapacitorAPI",
"KapacitorConfigAPI",
},
},
},
},
}
for _, tt := range tests {
c := &enterprise.Client{}
if got := c.Permissions(context.Background()); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. Client.Permissions() = %v, want %v", tt.name, got, tt.want)
}
}
}

View File

@ -1,568 +0,0 @@
package enterprise
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/influx"
)
// Shared transports for all clients to prevent leaking connections
var (
skipVerifyTransport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
defaultTransport = &http.Transport{}
)
type client interface {
Do(URL *url.URL, path, method string, authorizer influx.Authorizer, params map[string]string, body io.Reader) (*http.Response, error)
}
// MetaClient represents a Meta node in an Influx Enterprise cluster
type MetaClient struct {
URL *url.URL
client client
authorizer influx.Authorizer
}
// NewMetaClient represents a meta node in an Influx Enterprise cluster
func NewMetaClient(url *url.URL, InsecureSkipVerify bool, authorizer influx.Authorizer) *MetaClient {
return &MetaClient{
URL: url,
client: &defaultClient{
InsecureSkipVerify: InsecureSkipVerify,
},
authorizer: authorizer,
}
}
type jsonLDAPConfig struct {
Enabled bool `json:"enabled"`
}
// LDAPConfig represents the configuration for ldap from influxdb
type LDAPConfig struct {
Structured jsonLDAPConfig `json:"structured"`
}
func (m *MetaClient) requestLDAPChannel(ctx context.Context, errors chan error) chan *http.Response {
channel := make(chan *http.Response, 1)
go (func() {
res, err := m.Do(ctx, "/ldap/v1/config", "GET", m.authorizer, nil, nil)
if err != nil {
errors <- err
} else {
channel <- res
}
})()
return channel
}
// GetLDAPConfig get the current ldap config response from influxdb enterprise
func (m *MetaClient) GetLDAPConfig(ctx context.Context) (*LDAPConfig, error) {
ctxt, cancel := context.WithTimeout(ctx, 2*time.Second)
defer cancel()
errorCh := make(chan error, 1)
responseChannel := m.requestLDAPChannel(ctxt, errorCh)
select {
case res := <-responseChannel:
result, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
var config LDAPConfig
err = json.Unmarshal(result, &config)
if err != nil {
return nil, err
}
return &config, nil
case err := <-errorCh:
return nil, err
case <-ctxt.Done():
return nil, ctxt.Err()
}
}
// ShowCluster returns the cluster configuration (not health)
func (m *MetaClient) ShowCluster(ctx context.Context) (*Cluster, error) {
res, err := m.Do(ctx, "/show-cluster", "GET", m.authorizer, nil, nil)
if err != nil {
return nil, err
}
defer res.Body.Close()
dec := json.NewDecoder(res.Body)
out := &Cluster{}
err = dec.Decode(out)
if err != nil {
return nil, err
}
return out, nil
}
// Users gets all the users. If name is not nil it filters for a single user
func (m *MetaClient) Users(ctx context.Context, name *string) (*Users, error) {
params := map[string]string{}
if name != nil {
params["name"] = *name
}
res, err := m.Do(ctx, "/user", "GET", m.authorizer, params, nil)
if err != nil {
return nil, err
}
defer res.Body.Close()
dec := json.NewDecoder(res.Body)
users := &Users{}
err = dec.Decode(users)
if err != nil {
return nil, err
}
return users, nil
}
// User returns a single Influx Enterprise user
func (m *MetaClient) User(ctx context.Context, name string) (*User, error) {
users, err := m.Users(ctx, &name)
if err != nil {
return nil, err
}
for _, user := range users.Users {
return &user, nil
}
return nil, fmt.Errorf("no user found")
}
// CreateUser adds a user to Influx Enterprise
func (m *MetaClient) CreateUser(ctx context.Context, name, passwd string) error {
return m.CreateUpdateUser(ctx, "create", name, passwd)
}
// ChangePassword updates a user's password in Influx Enterprise
func (m *MetaClient) ChangePassword(ctx context.Context, name, passwd string) error {
return m.CreateUpdateUser(ctx, "change-password", name, passwd)
}
// CreateUpdateUser is a helper function to POST to the /user Influx Enterprise endpoint
func (m *MetaClient) CreateUpdateUser(ctx context.Context, action, name, passwd string) error {
a := &UserAction{
Action: action,
User: &User{
Name: name,
Password: passwd,
},
}
return m.Post(ctx, "/user", a, nil)
}
// DeleteUser removes a user from Influx Enterprise
func (m *MetaClient) DeleteUser(ctx context.Context, name string) error {
a := &UserAction{
Action: "delete",
User: &User{
Name: name,
},
}
return m.Post(ctx, "/user", a, nil)
}
// RemoveUserPerms revokes permissions for a user in Influx Enterprise
func (m *MetaClient) RemoveUserPerms(ctx context.Context, name string, perms Permissions) error {
a := &UserAction{
Action: "remove-permissions",
User: &User{
Name: name,
Permissions: perms,
},
}
return m.Post(ctx, "/user", a, nil)
}
// SetUserPerms removes permissions not in set and then adds the requested perms
func (m *MetaClient) SetUserPerms(ctx context.Context, name string, perms Permissions) error {
user, err := m.User(ctx, name)
if err != nil {
return err
}
revoke, add := permissionsDifference(perms, user.Permissions)
// first, revoke all the permissions the user currently has, but,
// shouldn't...
if len(revoke) > 0 {
err := m.RemoveUserPerms(ctx, name, revoke)
if err != nil {
return err
}
}
// ... next, add any permissions the user should have
if len(add) > 0 {
a := &UserAction{
Action: "add-permissions",
User: &User{
Name: name,
Permissions: add,
},
}
return m.Post(ctx, "/user", a, nil)
}
return nil
}
// UserRoles returns a map of users to all of their current roles
func (m *MetaClient) UserRoles(ctx context.Context) (map[string]Roles, error) {
res, err := m.Roles(ctx, nil)
if err != nil {
return nil, err
}
userRoles := make(map[string]Roles)
for _, role := range res.Roles {
for _, u := range role.Users {
ur, ok := userRoles[u]
if !ok {
ur = Roles{}
}
ur.Roles = append(ur.Roles, role)
userRoles[u] = ur
}
}
return userRoles, nil
}
// Roles gets all the roles. If name is not nil it filters for a single role
func (m *MetaClient) Roles(ctx context.Context, name *string) (*Roles, error) {
params := map[string]string{}
if name != nil {
params["name"] = *name
}
res, err := m.Do(ctx, "/role", "GET", m.authorizer, params, nil)
if err != nil {
return nil, err
}
defer res.Body.Close()
dec := json.NewDecoder(res.Body)
roles := &Roles{}
err = dec.Decode(roles)
if err != nil {
return nil, err
}
return roles, nil
}
// Role returns a single named role
func (m *MetaClient) Role(ctx context.Context, name string) (*Role, error) {
roles, err := m.Roles(ctx, &name)
if err != nil {
return nil, err
}
for _, role := range roles.Roles {
return &role, nil
}
return nil, fmt.Errorf("no role found")
}
// CreateRole adds a role to Influx Enterprise
func (m *MetaClient) CreateRole(ctx context.Context, name string) error {
a := &RoleAction{
Action: "create",
Role: &Role{
Name: name,
},
}
return m.Post(ctx, "/role", a, nil)
}
// DeleteRole removes a role from Influx Enterprise
func (m *MetaClient) DeleteRole(ctx context.Context, name string) error {
a := &RoleAction{
Action: "delete",
Role: &Role{
Name: name,
},
}
return m.Post(ctx, "/role", a, nil)
}
// RemoveRolePerms revokes permissions from a role
func (m *MetaClient) RemoveRolePerms(ctx context.Context, name string, perms Permissions) error {
a := &RoleAction{
Action: "remove-permissions",
Role: &Role{
Name: name,
Permissions: perms,
},
}
return m.Post(ctx, "/role", a, nil)
}
// SetRolePerms removes permissions not in set and then adds the requested perms to role
func (m *MetaClient) SetRolePerms(ctx context.Context, name string, perms Permissions) error {
role, err := m.Role(ctx, name)
if err != nil {
return err
}
revoke, add := permissionsDifference(perms, role.Permissions)
// first, revoke all the permissions the role currently has, but,
// shouldn't...
if len(revoke) > 0 {
err := m.RemoveRolePerms(ctx, name, revoke)
if err != nil {
return err
}
}
// ... next, add any permissions the role should have
if len(add) > 0 {
a := &RoleAction{
Action: "add-permissions",
Role: &Role{
Name: name,
Permissions: add,
},
}
return m.Post(ctx, "/role", a, nil)
}
return nil
}
// SetRoleUsers removes users not in role and then adds the requested users to role
func (m *MetaClient) SetRoleUsers(ctx context.Context, name string, users []string) error {
role, err := m.Role(ctx, name)
if err != nil {
return err
}
revoke, add := Difference(users, role.Users)
if err := m.RemoveRoleUsers(ctx, name, revoke); err != nil {
return err
}
return m.AddRoleUsers(ctx, name, add)
}
// Difference compares two sets and returns a set to be removed and a set to be added
func Difference(wants []string, haves []string) (revoke []string, add []string) {
for _, want := range wants {
found := false
for _, got := range haves {
if want != got {
continue
}
found = true
}
if !found {
add = append(add, want)
}
}
for _, got := range haves {
found := false
for _, want := range wants {
if want != got {
continue
}
found = true
break
}
if !found {
revoke = append(revoke, got)
}
}
return
}
func permissionsDifference(wants Permissions, haves Permissions) (revoke Permissions, add Permissions) {
revoke = make(Permissions)
add = make(Permissions)
for scope, want := range wants {
have, ok := haves[scope]
if ok {
r, a := Difference(want, have)
revoke[scope] = r
add[scope] = a
} else {
add[scope] = want
}
}
for scope, have := range haves {
_, ok := wants[scope]
if !ok {
revoke[scope] = have
}
}
return
}
// AddRoleUsers updates a role to have additional users.
func (m *MetaClient) AddRoleUsers(ctx context.Context, name string, users []string) error {
// No permissions to add, so, role is in the right state
if len(users) == 0 {
return nil
}
a := &RoleAction{
Action: "add-users",
Role: &Role{
Name: name,
Users: users,
},
}
return m.Post(ctx, "/role", a, nil)
}
// RemoveRoleUsers updates a role to remove some users.
func (m *MetaClient) RemoveRoleUsers(ctx context.Context, name string, users []string) error {
// No permissions to add, so, role is in the right state
if len(users) == 0 {
return nil
}
a := &RoleAction{
Action: "remove-users",
Role: &Role{
Name: name,
Users: users,
},
}
return m.Post(ctx, "/role", a, nil)
}
// Post is a helper function to POST to Influx Enterprise
func (m *MetaClient) Post(ctx context.Context, path string, action interface{}, params map[string]string) error {
b, err := json.Marshal(action)
if err != nil {
return err
}
body := bytes.NewReader(b)
_, err = m.Do(ctx, path, "POST", m.authorizer, params, body)
if err != nil {
return err
}
return nil
}
type defaultClient struct {
Leader string
InsecureSkipVerify bool
}
// Do is a helper function to interface with Influx Enterprise's Meta API
func (d *defaultClient) Do(URL *url.URL, path, method string, authorizer influx.Authorizer, params map[string]string, body io.Reader) (*http.Response, error) {
p := url.Values{}
for k, v := range params {
p.Add(k, v)
}
URL.Path = path
URL.RawQuery = p.Encode()
if d.Leader == "" {
d.Leader = URL.Host
} else if d.Leader != URL.Host {
URL.Host = d.Leader
}
req, err := http.NewRequest(method, URL.String(), body)
if err != nil {
return nil, err
}
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
if authorizer != nil {
if err = authorizer.Set(req); err != nil {
return nil, err
}
}
// Meta servers will redirect (307) to leader. We need
// special handling to preserve authentication headers.
client := &http.Client{
CheckRedirect: d.AuthedCheckRedirect,
}
if d.InsecureSkipVerify {
client.Transport = skipVerifyTransport
} else {
client.Transport = defaultTransport
}
res, err := client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
defer res.Body.Close()
dec := json.NewDecoder(res.Body)
out := &Error{}
err = dec.Decode(out)
if err != nil {
return nil, err
}
return nil, errors.New(out.Error)
}
return res, nil
}
// AuthedCheckRedirect tries to follow the Influx Enterprise pattern of
// redirecting to the leader but preserving authentication headers.
func (d *defaultClient) AuthedCheckRedirect(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("too many redirects")
} else if len(via) == 0 {
return nil
}
preserve := "Authorization"
if auth, ok := via[0].Header[preserve]; ok {
req.Header[preserve] = auth
}
d.Leader = req.URL.Host
return nil
}
// Do is a cancelable function to interface with Influx Enterprise's Meta API
func (m *MetaClient) Do(ctx context.Context, path, method string, authorizer influx.Authorizer, params map[string]string, body io.Reader) (*http.Response, error) {
type result struct {
Response *http.Response
Err error
}
resps := make(chan (result))
go func() {
resp, err := m.client.Do(m.URL, path, method, authorizer, params, body)
resps <- result{resp, err}
}()
select {
case resp := <-resps:
return resp.Response, resp.Err
case <-ctx.Done():
return nil, chronograf.ErrUpstreamTimeout
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,142 +0,0 @@
package enterprise_test
import (
"context"
"encoding/json"
"net/url"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/enterprise"
)
type ControlClient struct {
Cluster *enterprise.Cluster
ShowClustersCalled bool
}
func NewMockControlClient(addr string) *ControlClient {
_, err := url.Parse(addr)
if err != nil {
panic(err)
}
return &ControlClient{
Cluster: &enterprise.Cluster{
DataNodes: []enterprise.DataNode{
enterprise.DataNode{
HTTPAddr: addr,
},
},
},
}
}
func (cc *ControlClient) ShowCluster(context.Context) (*enterprise.Cluster, error) {
cc.ShowClustersCalled = true
return cc.Cluster, nil
}
func (cc *ControlClient) User(ctx context.Context, name string) (*enterprise.User, error) {
return nil, nil
}
func (cc *ControlClient) CreateUser(ctx context.Context, name, passwd string) error {
return nil
}
func (cc *ControlClient) DeleteUser(ctx context.Context, name string) error {
return nil
}
func (cc *ControlClient) ChangePassword(ctx context.Context, name, passwd string) error {
return nil
}
func (cc *ControlClient) Users(ctx context.Context, name *string) (*enterprise.Users, error) {
return nil, nil
}
func (cc *ControlClient) SetUserPerms(ctx context.Context, name string, perms enterprise.Permissions) error {
return nil
}
func (cc *ControlClient) CreateRole(ctx context.Context, name string) error {
return nil
}
func (cc *ControlClient) Role(ctx context.Context, name string) (*enterprise.Role, error) {
return nil, nil
}
func (ccm *ControlClient) UserRoles(ctx context.Context) (map[string]enterprise.Roles, error) {
return nil, nil
}
func (ccm *ControlClient) Roles(ctx context.Context, name *string) (*enterprise.Roles, error) {
return nil, nil
}
func (cc *ControlClient) DeleteRole(ctx context.Context, name string) error {
return nil
}
func (cc *ControlClient) SetRolePerms(ctx context.Context, name string, perms enterprise.Permissions) error {
return nil
}
func (cc *ControlClient) SetRoleUsers(ctx context.Context, name string, users []string) error {
return nil
}
func (cc *ControlClient) AddRoleUsers(ctx context.Context, name string, users []string) error {
return nil
}
func (cc *ControlClient) RemoveRoleUsers(ctx context.Context, name string, users []string) error {
return nil
}
type TimeSeries struct {
URLs []string
Response Response
QueryCtr int
}
type Response struct{}
func (r *Response) MarshalJSON() ([]byte, error) {
return json.Marshal(r)
}
func (ts *TimeSeries) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) {
ts.QueryCtr++
return &Response{}, nil
}
func (ts *TimeSeries) Connect(ctx context.Context, src *chronograf.Source) error {
return nil
}
func (ts *TimeSeries) Write(ctx context.Context, points []chronograf.Point) error {
return nil
}
func (ts *TimeSeries) Users(ctx context.Context) chronograf.UsersStore {
return nil
}
func (ts *TimeSeries) Roles(ctx context.Context) (chronograf.RolesStore, error) {
return nil, nil
}
func (ts *TimeSeries) Permissions(ctx context.Context) chronograf.Permissions {
return chronograf.Permissions{}
}
func NewMockTimeSeries(urls ...string) *TimeSeries {
return &TimeSeries{
URLs: urls,
Response: Response{},
}
}

View File

@ -1,113 +0,0 @@
package enterprise
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
)
// RolesStore uses a control client operate on Influx Enterprise roles. Roles are
// groups of permissions applied to groups of users
type RolesStore struct {
Ctrl
Logger chronograf.Logger
}
// Add creates a new Role in Influx Enterprise
// This must be done in three smaller steps: creating, setting permissions, setting users.
func (c *RolesStore) Add(ctx context.Context, u *chronograf.Role) (*chronograf.Role, error) {
if err := c.Ctrl.CreateRole(ctx, u.Name); err != nil {
return nil, err
}
if err := c.Ctrl.SetRolePerms(ctx, u.Name, ToEnterprise(u.Permissions)); err != nil {
return nil, err
}
users := make([]string, len(u.Users))
for i, u := range u.Users {
users[i] = u.Name
}
if err := c.Ctrl.SetRoleUsers(ctx, u.Name, users); err != nil {
return nil, err
}
return u, nil
}
// Delete the Role from Influx Enterprise
func (c *RolesStore) Delete(ctx context.Context, u *chronograf.Role) error {
return c.Ctrl.DeleteRole(ctx, u.Name)
}
// Get retrieves a Role if name exists.
func (c *RolesStore) Get(ctx context.Context, name string) (*chronograf.Role, error) {
role, err := c.Ctrl.Role(ctx, name)
if err != nil {
return nil, err
}
// Hydrate all the users to gather their permissions and their roles.
users := make([]chronograf.User, len(role.Users))
for i, u := range role.Users {
user, err := c.Ctrl.User(ctx, u)
if err != nil {
return nil, err
}
users[i] = chronograf.User{
Name: user.Name,
Permissions: ToChronograf(user.Permissions),
}
}
return &chronograf.Role{
Name: role.Name,
Permissions: ToChronograf(role.Permissions),
Users: users,
}, nil
}
// Update the Role's permissions and roles
func (c *RolesStore) Update(ctx context.Context, u *chronograf.Role) error {
if u.Permissions != nil {
perms := ToEnterprise(u.Permissions)
if err := c.Ctrl.SetRolePerms(ctx, u.Name, perms); err != nil {
return err
}
}
if u.Users != nil {
users := make([]string, len(u.Users))
for i, u := range u.Users {
users[i] = u.Name
}
return c.Ctrl.SetRoleUsers(ctx, u.Name, users)
}
return nil
}
// All is all Roles in influx
func (c *RolesStore) All(ctx context.Context) ([]chronograf.Role, error) {
all, err := c.Ctrl.Roles(ctx, nil)
if err != nil {
return nil, err
}
return all.ToChronograf(), nil
}
// ToChronograf converts enterprise roles to chronograf
func (r *Roles) ToChronograf() []chronograf.Role {
res := make([]chronograf.Role, len(r.Roles))
for i, role := range r.Roles {
users := make([]chronograf.User, len(role.Users))
for i, user := range role.Users {
users[i] = chronograf.User{
Name: user,
}
}
res[i] = chronograf.Role{
Name: role.Name,
Permissions: ToChronograf(role.Permissions),
Users: users,
}
}
return res
}

View File

@ -1,32 +0,0 @@
package enterprise
import (
"reflect"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestRoles_ToChronograf(t *testing.T) {
tests := []struct {
name string
roles []Role
want []chronograf.Role
}{
{
name: "empty roles",
roles: []Role{},
want: []chronograf.Role{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &Roles{
Roles: tt.roles,
}
if got := r.ToChronograf(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Roles.ToChronograf() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,71 +0,0 @@
package enterprise
// Cluster is a collection of data nodes and non-data nodes within a
// Plutonium cluster.
type Cluster struct {
DataNodes []DataNode `json:"data"`
MetaNodes []Node `json:"meta"`
}
// DataNode represents a data node in an Influx Enterprise Cluster
type DataNode struct {
ID uint64 `json:"id"` // Meta store ID.
TCPAddr string `json:"tcpAddr"` // RPC addr, e.g., host:8088.
HTTPAddr string `json:"httpAddr"` // Client addr, e.g., host:8086.
HTTPScheme string `json:"httpScheme"` // "http" or "https" for HTTP addr.
Status string `json:"status,omitempty"` // The cluster status of the node.
}
// Node represent any meta or data node in an Influx Enterprise cluster
type Node struct {
ID uint64 `json:"id"`
Addr string `json:"addr"`
HTTPScheme string `json:"httpScheme"`
TCPAddr string `json:"tcpAddr"`
}
// Permissions maps resources to a set of permissions.
// Specifically, it maps a database to a set of permissions
type Permissions map[string][]string
// User represents an enterprise user.
type User struct {
Name string `json:"name"`
Password string `json:"password,omitempty"`
Permissions Permissions `json:"permissions,omitempty"`
}
// Users represents a set of enterprise users.
type Users struct {
Users []User `json:"users,omitempty"`
}
// UserAction represents and action to be taken with a user.
type UserAction struct {
Action string `json:"action"`
User *User `json:"user"`
}
// Role is a restricted set of permissions assigned to a set of users.
type Role struct {
Name string `json:"name"`
NewName string `json:"newName,omitempty"`
Permissions Permissions `json:"permissions,omitempty"`
Users []string `json:"users,omitempty"`
}
// Roles is a set of roles
type Roles struct {
Roles []Role `json:"roles,omitempty"`
}
// RoleAction represents an action to be taken with a role.
type RoleAction struct {
Action string `json:"action"`
Role *Role `json:"role"`
}
// Error is JSON error message return by Influx Enterprise's meta API.
type Error struct {
Error string `json:"error"`
}

View File

@ -1,197 +0,0 @@
package enterprise
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
// UserStore uses a control client operate on Influx Enterprise users
type UserStore struct {
Ctrl
Logger chronograf.Logger
}
// Add creates a new User in Influx Enterprise
func (c *UserStore) Add(ctx context.Context, u *chronograf.User) (*chronograf.User, error) {
if err := c.Ctrl.CreateUser(ctx, u.Name, u.Passwd); err != nil {
return nil, err
}
perms := ToEnterprise(u.Permissions)
if err := c.Ctrl.SetUserPerms(ctx, u.Name, perms); err != nil {
return nil, err
}
for _, role := range u.Roles {
if err := c.Ctrl.AddRoleUsers(ctx, role.Name, []string{u.Name}); err != nil {
return nil, err
}
}
return c.Get(ctx, chronograf.UserQuery{Name: &u.Name})
}
// Delete the User from Influx Enterprise
func (c *UserStore) Delete(ctx context.Context, u *chronograf.User) error {
return c.Ctrl.DeleteUser(ctx, u.Name)
}
// Num of users in Influx
func (c *UserStore) Num(ctx context.Context) (int, error) {
all, err := c.All(ctx)
if err != nil {
return 0, err
}
return len(all), nil
}
// Get retrieves a user if name exists.
func (c *UserStore) Get(ctx context.Context, q chronograf.UserQuery) (*chronograf.User, error) {
if q.Name == nil {
return nil, fmt.Errorf("query must specify name")
}
u, err := c.Ctrl.User(ctx, *q.Name)
if err != nil {
return nil, err
}
ur, err := c.Ctrl.UserRoles(ctx)
if err != nil {
return nil, err
}
role := ur[*q.Name]
cr := role.ToChronograf()
// For now we are removing all users from a role being returned.
for i, r := range cr {
r.Users = []chronograf.User{}
cr[i] = r
}
return &chronograf.User{
Name: u.Name,
Permissions: ToChronograf(u.Permissions),
Roles: cr,
}, nil
}
// Update the user's permissions or roles
func (c *UserStore) Update(ctx context.Context, u *chronograf.User) error {
// Only allow one type of change at a time. If it is a password
// change then do it and return without any changes to permissions
if u.Passwd != "" {
return c.Ctrl.ChangePassword(ctx, u.Name, u.Passwd)
}
if u.Roles != nil {
// Make a list of the roles we want this user to have:
want := make([]string, len(u.Roles))
for i, r := range u.Roles {
want[i] = r.Name
}
// Find the list of all roles this user is currently in
userRoles, err := c.UserRoles(ctx)
if err != nil {
return nil
}
// Make a list of the roles the user currently has
roles := userRoles[u.Name]
have := make([]string, len(roles.Roles))
for i, r := range roles.Roles {
have[i] = r.Name
}
// Calculate the roles the user will be removed from and the roles the user
// will be added to.
revoke, add := Difference(want, have)
// First, add the user to the new roles
for _, role := range add {
if err := c.Ctrl.AddRoleUsers(ctx, role, []string{u.Name}); err != nil {
return err
}
}
// ... and now remove the user from an extra roles
for _, role := range revoke {
if err := c.Ctrl.RemoveRoleUsers(ctx, role, []string{u.Name}); err != nil {
return err
}
}
}
if u.Permissions != nil {
perms := ToEnterprise(u.Permissions)
return c.Ctrl.SetUserPerms(ctx, u.Name, perms)
}
return nil
}
// All is all users in influx
func (c *UserStore) All(ctx context.Context) ([]chronograf.User, error) {
all, err := c.Ctrl.Users(ctx, nil)
if err != nil {
return nil, err
}
ur, err := c.Ctrl.UserRoles(ctx)
if err != nil {
return nil, err
}
res := make([]chronograf.User, len(all.Users))
for i, user := range all.Users {
role := ur[user.Name]
cr := role.ToChronograf()
// For now we are removing all users from a role being returned.
for i, r := range cr {
r.Users = []chronograf.User{}
cr[i] = r
}
res[i] = chronograf.User{
Name: user.Name,
Permissions: ToChronograf(user.Permissions),
Roles: cr,
}
}
return res, nil
}
// ToEnterprise converts chronograf permission shape to enterprise
func ToEnterprise(perms chronograf.Permissions) Permissions {
res := Permissions{}
for _, perm := range perms {
if perm.Scope == chronograf.AllScope {
// Enterprise uses empty string as the key for all databases
res[""] = perm.Allowed
} else {
res[perm.Name] = perm.Allowed
}
}
return res
}
// ToChronograf converts enterprise permissions shape to chronograf shape
func ToChronograf(perms Permissions) chronograf.Permissions {
res := chronograf.Permissions{}
for db, perm := range perms {
// Enterprise uses empty string as the key for all databases
if db == "" {
res = append(res, chronograf.Permission{
Scope: chronograf.AllScope,
Allowed: perm,
})
} else {
res = append(res, chronograf.Permission{
Scope: chronograf.DBScope,
Name: db,
Allowed: perm,
})
}
}
return res
}

View File

@ -1,866 +0,0 @@
package enterprise_test
import (
"context"
"fmt"
"reflect"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/enterprise"
)
func TestClient_Add(t *testing.T) {
type fields struct {
Ctrl *mockCtrl
Logger chronograf.Logger
}
type args struct {
ctx context.Context
u *chronograf.User
}
tests := []struct {
name string
fields fields
args args
want *chronograf.User
wantErr bool
}{
{
name: "Successful Create User",
fields: fields{
Ctrl: &mockCtrl{
createUser: func(ctx context.Context, name, passwd string) error {
return nil
},
setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error {
return nil
},
user: func(ctx context.Context, name string) (*enterprise.User, error) {
return &enterprise.User{
Name: "marty",
Password: "johnny be good",
Permissions: map[string][]string{
"": {
"ViewChronograf",
"ReadData",
"WriteData",
},
},
}, nil
},
userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) {
return map[string]enterprise.Roles{}, nil
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Passwd: "johnny be good",
},
},
want: &chronograf.User{
Name: "marty",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"},
},
},
Roles: []chronograf.Role{},
},
},
{
name: "Successful Create User with roles",
fields: fields{
Ctrl: &mockCtrl{
createUser: func(ctx context.Context, name, passwd string) error {
return nil
},
setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error {
return nil
},
user: func(ctx context.Context, name string) (*enterprise.User, error) {
return &enterprise.User{
Name: "marty",
Password: "johnny be good",
Permissions: map[string][]string{
"": {
"ViewChronograf",
"ReadData",
"WriteData",
},
},
}, nil
},
userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) {
return map[string]enterprise.Roles{
"marty": enterprise.Roles{
Roles: []enterprise.Role{
{
Name: "admin",
},
},
},
}, nil
},
addRoleUsers: func(ctx context.Context, name string, users []string) error {
return nil
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Passwd: "johnny be good",
Roles: []chronograf.Role{
{
Name: "admin",
},
},
},
},
want: &chronograf.User{
Name: "marty",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"},
},
},
Roles: []chronograf.Role{
{
Name: "admin",
Users: []chronograf.User{},
Permissions: chronograf.Permissions{},
},
},
},
},
{
name: "Failure to Create User",
fields: fields{
Ctrl: &mockCtrl{
createUser: func(ctx context.Context, name, passwd string) error {
return fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?")
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Passwd: "johnny be good",
},
},
wantErr: true,
},
}
for _, tt := range tests {
c := &enterprise.UserStore{
Ctrl: tt.fields.Ctrl,
Logger: tt.fields.Logger,
}
got, err := c.Add(tt.args.ctx, tt.args.u)
if (err != nil) != tt.wantErr {
t.Errorf("%q. Client.Add() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. Client.Add() = \n%#v\n, want \n%#v\n", tt.name, got, tt.want)
}
}
}
func TestClient_Delete(t *testing.T) {
type fields struct {
Ctrl *mockCtrl
Logger chronograf.Logger
}
type args struct {
ctx context.Context
u *chronograf.User
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
name: "Successful Delete User",
fields: fields{
Ctrl: &mockCtrl{
deleteUser: func(ctx context.Context, name string) error {
return nil
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Passwd: "johnny be good",
},
},
},
{
name: "Failure to Delete User",
fields: fields{
Ctrl: &mockCtrl{
deleteUser: func(ctx context.Context, name string) error {
return fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?")
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Passwd: "johnny be good",
},
},
wantErr: true,
},
}
for _, tt := range tests {
c := &enterprise.UserStore{
Ctrl: tt.fields.Ctrl,
Logger: tt.fields.Logger,
}
if err := c.Delete(tt.args.ctx, tt.args.u); (err != nil) != tt.wantErr {
t.Errorf("%q. Client.Delete() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
}
}
func TestClient_Get(t *testing.T) {
type fields struct {
Ctrl *mockCtrl
Logger chronograf.Logger
}
type args struct {
ctx context.Context
name string
}
tests := []struct {
name string
fields fields
args args
want *chronograf.User
wantErr bool
}{
{
name: "Successful Get User",
fields: fields{
Ctrl: &mockCtrl{
user: func(ctx context.Context, name string) (*enterprise.User, error) {
return &enterprise.User{
Name: "marty",
Password: "johnny be good",
Permissions: map[string][]string{
"": {
"ViewChronograf",
"ReadData",
"WriteData",
},
},
}, nil
},
userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) {
return map[string]enterprise.Roles{}, nil
},
},
},
args: args{
ctx: context.Background(),
name: "marty",
},
want: &chronograf.User{
Name: "marty",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"},
},
},
Roles: []chronograf.Role{},
},
},
{
name: "Successful Get User with roles",
fields: fields{
Ctrl: &mockCtrl{
user: func(ctx context.Context, name string) (*enterprise.User, error) {
return &enterprise.User{
Name: "marty",
Password: "johnny be good",
Permissions: map[string][]string{
"": {
"ViewChronograf",
"ReadData",
"WriteData",
},
},
}, nil
},
userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) {
return map[string]enterprise.Roles{
"marty": enterprise.Roles{
Roles: []enterprise.Role{
{
Name: "timetravels",
Permissions: map[string][]string{
"": {
"ViewChronograf",
"ReadData",
"WriteData",
},
},
Users: []string{"marty", "docbrown"},
},
},
},
}, nil
},
},
},
args: args{
ctx: context.Background(),
name: "marty",
},
want: &chronograf.User{
Name: "marty",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"},
},
},
Roles: []chronograf.Role{
{
Name: "timetravels",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"},
},
},
Users: []chronograf.User{},
},
},
},
},
{
name: "Failure to get User",
fields: fields{
Ctrl: &mockCtrl{
user: func(ctx context.Context, name string) (*enterprise.User, error) {
return nil, fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?")
},
},
},
args: args{
ctx: context.Background(),
name: "marty",
},
wantErr: true,
},
}
for _, tt := range tests {
c := &enterprise.UserStore{
Ctrl: tt.fields.Ctrl,
Logger: tt.fields.Logger,
}
got, err := c.Get(tt.args.ctx, chronograf.UserQuery{Name: &tt.args.name})
if (err != nil) != tt.wantErr {
t.Errorf("%q. Client.Get() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. Client.Get() = %v, want %v", tt.name, got, tt.want)
}
}
}
func TestClient_Update(t *testing.T) {
type fields struct {
Ctrl *mockCtrl
Logger chronograf.Logger
}
type args struct {
ctx context.Context
u *chronograf.User
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
name: "Successful Change Password",
fields: fields{
Ctrl: &mockCtrl{
changePassword: func(ctx context.Context, name, passwd string) error {
return nil
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Passwd: "johnny be good",
},
},
},
{
name: "Failure to Change Password",
fields: fields{
Ctrl: &mockCtrl{
changePassword: func(ctx context.Context, name, passwd string) error {
return fmt.Errorf("ronald Reagan, the actor?! Ha Then whos Vice President Jerry Lewis? I suppose Jane Wyman is First Lady")
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Passwd: "johnny be good",
},
},
wantErr: true,
},
{
name: "Success setting permissions User",
fields: fields{
Ctrl: &mockCtrl{
setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error {
return nil
},
userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) {
return map[string]enterprise.Roles{}, nil
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"},
},
},
},
},
wantErr: false,
},
{
name: "Success setting permissions and roles for user",
fields: fields{
Ctrl: &mockCtrl{
setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error {
return nil
},
addRoleUsers: func(ctx context.Context, name string, users []string) error {
return nil
},
userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) {
return map[string]enterprise.Roles{}, nil
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"},
},
},
Roles: []chronograf.Role{
{
Name: "adminrole",
},
},
},
},
wantErr: false,
},
{
name: "Failure setting permissions User",
fields: fields{
Ctrl: &mockCtrl{
setUserPerms: func(ctx context.Context, name string, perms enterprise.Permissions) error {
return fmt.Errorf("they found me, I don't know how, but they found me.")
},
userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) {
return map[string]enterprise.Roles{}, nil
},
},
},
args: args{
ctx: context.Background(),
u: &chronograf.User{
Name: "marty",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"},
},
},
},
},
wantErr: true,
},
}
for _, tt := range tests {
c := &enterprise.UserStore{
Ctrl: tt.fields.Ctrl,
Logger: tt.fields.Logger,
}
if err := c.Update(tt.args.ctx, tt.args.u); (err != nil) != tt.wantErr {
t.Errorf("%q. Client.Update() error = %v, wantErr %v", tt.name, err, tt.wantErr)
}
}
}
func TestClient_Num(t *testing.T) {
type fields struct {
Ctrl *mockCtrl
Logger chronograf.Logger
}
type args struct {
ctx context.Context
}
tests := []struct {
name string
fields fields
args args
want []chronograf.User
wantErr bool
}{
{
name: "Successful Get User",
fields: fields{
Ctrl: &mockCtrl{
users: func(ctx context.Context, name *string) (*enterprise.Users, error) {
return &enterprise.Users{
Users: []enterprise.User{
{
Name: "marty",
Password: "johnny be good",
Permissions: map[string][]string{
"": {
"ViewChronograf",
"ReadData",
"WriteData",
},
},
},
},
}, nil
},
userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) {
return map[string]enterprise.Roles{}, nil
},
},
},
args: args{
ctx: context.Background(),
},
want: []chronograf.User{
{
Name: "marty",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"},
},
},
Roles: []chronograf.Role{},
},
},
},
{
name: "Failure to get User",
fields: fields{
Ctrl: &mockCtrl{
users: func(ctx context.Context, name *string) (*enterprise.Users, error) {
return nil, fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?")
},
},
},
args: args{
ctx: context.Background(),
},
wantErr: true,
},
}
for _, tt := range tests {
c := &enterprise.UserStore{
Ctrl: tt.fields.Ctrl,
Logger: tt.fields.Logger,
}
got, err := c.Num(tt.args.ctx)
if (err != nil) != tt.wantErr {
t.Errorf("%q. Client.Num() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if got != len(tt.want) {
t.Errorf("%q. Client.Num() = %v, want %v", tt.name, got, len(tt.want))
}
}
}
func TestClient_All(t *testing.T) {
type fields struct {
Ctrl *mockCtrl
Logger chronograf.Logger
}
type args struct {
ctx context.Context
}
tests := []struct {
name string
fields fields
args args
want []chronograf.User
wantErr bool
}{
{
name: "Successful Get User",
fields: fields{
Ctrl: &mockCtrl{
users: func(ctx context.Context, name *string) (*enterprise.Users, error) {
return &enterprise.Users{
Users: []enterprise.User{
{
Name: "marty",
Password: "johnny be good",
Permissions: map[string][]string{
"": {
"ViewChronograf",
"ReadData",
"WriteData",
},
},
},
},
}, nil
},
userRoles: func(ctx context.Context) (map[string]enterprise.Roles, error) {
return map[string]enterprise.Roles{}, nil
},
},
},
args: args{
ctx: context.Background(),
},
want: []chronograf.User{
{
Name: "marty",
Permissions: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "ReadData", "WriteData"},
},
},
Roles: []chronograf.Role{},
},
},
},
{
name: "Failure to get User",
fields: fields{
Ctrl: &mockCtrl{
users: func(ctx context.Context, name *string) (*enterprise.Users, error) {
return nil, fmt.Errorf("1.21 Gigawatts! Tom, how could I have been so careless?")
},
},
},
args: args{
ctx: context.Background(),
},
wantErr: true,
},
}
for _, tt := range tests {
c := &enterprise.UserStore{
Ctrl: tt.fields.Ctrl,
Logger: tt.fields.Logger,
}
got, err := c.All(tt.args.ctx)
if (err != nil) != tt.wantErr {
t.Errorf("%q. Client.All() error = %v, wantErr %v", tt.name, err, tt.wantErr)
continue
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. Client.All() = %v, want %v", tt.name, got, tt.want)
}
}
}
func Test_ToEnterprise(t *testing.T) {
tests := []struct {
name string
perms chronograf.Permissions
want enterprise.Permissions
}{
{
name: "All Scopes",
want: enterprise.Permissions{"": []string{"ViewChronograf", "KapacitorAPI"}},
perms: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"},
},
},
},
{
name: "DB Scope",
want: enterprise.Permissions{"telegraf": []string{"ReadData", "WriteData"}},
perms: chronograf.Permissions{
{
Scope: chronograf.DBScope,
Name: "telegraf",
Allowed: chronograf.Allowances{"ReadData", "WriteData"},
},
},
},
}
for _, tt := range tests {
if got := enterprise.ToEnterprise(tt.perms); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. ToEnterprise() = %v, want %v", tt.name, got, tt.want)
}
}
}
func Test_ToChronograf(t *testing.T) {
tests := []struct {
name string
perms enterprise.Permissions
want chronograf.Permissions
}{
{
name: "All Scopes",
perms: enterprise.Permissions{"": []string{"ViewChronograf", "KapacitorAPI"}},
want: chronograf.Permissions{
{
Scope: chronograf.AllScope,
Allowed: chronograf.Allowances{"ViewChronograf", "KapacitorAPI"},
},
},
},
{
name: "DB Scope",
perms: enterprise.Permissions{"telegraf": []string{"ReadData", "WriteData"}},
want: chronograf.Permissions{
{
Scope: chronograf.DBScope,
Name: "telegraf",
Allowed: chronograf.Allowances{"ReadData", "WriteData"},
},
},
},
}
for _, tt := range tests {
if got := enterprise.ToChronograf(tt.perms); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%q. toChronograf() = %v, want %v", tt.name, got, tt.want)
}
}
}
type mockCtrl struct {
showCluster func(ctx context.Context) (*enterprise.Cluster, error)
user func(ctx context.Context, name string) (*enterprise.User, error)
createUser func(ctx context.Context, name, passwd string) error
deleteUser func(ctx context.Context, name string) error
changePassword func(ctx context.Context, name, passwd string) error
users func(ctx context.Context, name *string) (*enterprise.Users, error)
setUserPerms func(ctx context.Context, name string, perms enterprise.Permissions) error
userRoles func(ctx context.Context) (map[string]enterprise.Roles, error)
roles func(ctx context.Context, name *string) (*enterprise.Roles, error)
role func(ctx context.Context, name string) (*enterprise.Role, error)
createRole func(ctx context.Context, name string) error
deleteRole func(ctx context.Context, name string) error
setRolePerms func(ctx context.Context, name string, perms enterprise.Permissions) error
setRoleUsers func(ctx context.Context, name string, users []string) error
addRoleUsers func(ctx context.Context, name string, users []string) error
removeRoleUsers func(ctx context.Context, name string, users []string) error
}
func (m *mockCtrl) ShowCluster(ctx context.Context) (*enterprise.Cluster, error) {
return m.showCluster(ctx)
}
func (m *mockCtrl) User(ctx context.Context, name string) (*enterprise.User, error) {
return m.user(ctx, name)
}
func (m *mockCtrl) CreateUser(ctx context.Context, name, passwd string) error {
return m.createUser(ctx, name, passwd)
}
func (m *mockCtrl) DeleteUser(ctx context.Context, name string) error {
return m.deleteUser(ctx, name)
}
func (m *mockCtrl) ChangePassword(ctx context.Context, name, passwd string) error {
return m.changePassword(ctx, name, passwd)
}
func (m *mockCtrl) Users(ctx context.Context, name *string) (*enterprise.Users, error) {
return m.users(ctx, name)
}
func (m *mockCtrl) SetUserPerms(ctx context.Context, name string, perms enterprise.Permissions) error {
return m.setUserPerms(ctx, name, perms)
}
func (m *mockCtrl) UserRoles(ctx context.Context) (map[string]enterprise.Roles, error) {
return m.userRoles(ctx)
}
func (m *mockCtrl) Roles(ctx context.Context, name *string) (*enterprise.Roles, error) {
return m.roles(ctx, name)
}
func (m *mockCtrl) Role(ctx context.Context, name string) (*enterprise.Role, error) {
return m.role(ctx, name)
}
func (m *mockCtrl) CreateRole(ctx context.Context, name string) error {
return m.createRole(ctx, name)
}
func (m *mockCtrl) DeleteRole(ctx context.Context, name string) error {
return m.deleteRole(ctx, name)
}
func (m *mockCtrl) SetRolePerms(ctx context.Context, name string, perms enterprise.Permissions) error {
return m.setRolePerms(ctx, name, perms)
}
func (m *mockCtrl) SetRoleUsers(ctx context.Context, name string, users []string) error {
return m.setRoleUsers(ctx, name, users)
}
func (m *mockCtrl) AddRoleUsers(ctx context.Context, name string, users []string) error {
return m.addRoleUsers(ctx, name, users)
}
func (m *mockCtrl) RemoveRoleUsers(ctx context.Context, name string, users []string) error {
return m.removeRoleUsers(ctx, name, users)
}

View File

@ -1,44 +0,0 @@
FROM ubuntu:trusty
RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y \
apt-transport-https \
python-dev \
wget \
curl \
git \
mercurial \
make \
ruby \
ruby-dev \
rpm \
zip \
python-pip \
autoconf \
libtool
RUN pip install boto requests python-jose --upgrade
RUN gem install fpm
# Install node
ENV NODE_VERSION v8.10.0
RUN wget -q https://nodejs.org/dist/${NODE_VERSION}/node-${NODE_VERSION}-linux-x64.tar.gz; \
tar -xvf node-${NODE_VERSION}-linux-x64.tar.gz -C / --strip-components=1; \
rm -f node-${NODE_VERSION}-linux-x64.tar.gz
# Install go
ENV GOPATH /root/go
ENV GO_VERSION 1.10
ENV GO_ARCH amd64
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz
ENV PATH /usr/local/go/bin:$PATH
ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb/chronograf
ENV PATH $GOPATH/bin:$PATH
RUN mkdir -p $PROJECT_DIR
WORKDIR $PROJECT_DIR
VOLUME $PROJECT_DIR
ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/chronograf/etc/build.py" ]

View File

@ -1,15 +0,0 @@
## Builds
Builds are run from a docker build image that is configured with the node and go we support.
Our circle.yml uses this docker container to build, test and create release packages.
### Updating new node/go versions
After updating the Dockerfile_build run
`docker build -t quay.io/influxdb/builder:chronograf-$(date "+%Y%m%d") -f Dockerfile_build .`
and push to quay with:
`docker push quay.io/influxdb/builder:chronograf-$(date "+%Y%m%d")`
### Update circle
Update DOCKER_TAG in circle.yml to the new container.

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
# TODO: wire up configuration files

View File

@ -1,4 +0,0 @@
#!/bin/sh
for a in `gdl -no-vendored -test -repo ./... | awk 'NR>1 {print $5}'`; do echo \[\]\($a/blob/master/\) ; done
nlf -c |awk -F, '{printf "%s %s \[%s\]\(%s\)\n", $1, $2, $5, $4}'

View File

@ -1,21 +0,0 @@
# If you modify this, please also make sure to edit init.sh
[Unit]
Description=Open source monitoring and visualization UI for the entire TICK stack.
Documentation="https://www.influxdata.com/time-series-platform/chronograf/"
After=network-online.target
[Service]
User=chronograf
Group=chronograf
Environment="HOST=0.0.0.0"
Environment="PORT=8888"
Environment="BOLT_PATH=/var/lib/chronograf/chronograf-v1.db"
Environment="CANNED_PATH=/usr/share/chronograf/canned"
EnvironmentFile=-/etc/default/chronograf
ExecStart=/usr/bin/chronograf $CHRONOGRAF_OPTS
KillMode=control-group
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -1,8 +0,0 @@
#!/bin/bash
set -x
docker_tag="chronograf-$(date +%Y%m%d)"
docker build --rm=false -f etc/Dockerfile_build -t builder:$docker_tag .
docker tag builder:$docker_tag quay.io/influxdb/builder:$docker_tag
docker push quay.io/influxdb/builder:$docker_tag

View File

@ -1,11 +0,0 @@
#!/bin/bash
#
# Pull the required build image from quay.io.
#
if [[ -z "$DOCKER_TAG" ]]; then
echo "Please specify a tag to pull from with the DOCKER_TAG env variable."
exit 1
fi
docker pull quay.io/influxdb/builder:$DOCKER_TAG

View File

@ -1,26 +0,0 @@
#!/bin/bash
#
# Pass all CLI arguments to Chronograf builder Docker image (passing
# them to the build scripts)
#
# WARNING: This script passes your SSH and AWS credentials within the
# Docker image, so use with caution.
#
set -e
# Default SSH key to $HOME/.ssh/id_rsa if not set
test -z $SSH_KEY_PATH && SSH_KEY_PATH="$HOME/.ssh/id_rsa"
echo "Using SSH key located at: $SSH_KEY_PATH"
# Default docker tag if not specified
test -z "$DOCKER_TAG" && DOCKER_TAG="chronograf-20161121"
docker run \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-v $SSH_KEY_PATH:/root/.ssh/id_rsa \
-v ~/.ssh/known_hosts:/root/.ssh/known_hosts \
-v $(pwd):/root/go/src/github.com/influxdata/influxdb/chronograf \
quay.io/influxdb/builder:$DOCKER_TAG \
"$@"

View File

@ -1,112 +0,0 @@
#!/bin/bash
### BEGIN INIT INFO
# Provides: chronograf
# Required-Start: $local_fs $network $named $time $syslog
# Required-Stop: $local_fs $network $named $time $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start the Chronograf service at boot time
### END INIT INFO
# If you modify this, please make sure to also edit chronograf.service
# Script to execute when starting
SCRIPT="/usr/bin/chronograf"
export HOST="0.0.0.0"
export PORT="8888"
export BOLT_PATH="/var/lib/chronograf/chronograf-v1.db"
export CANNED_PATH="/usr/share/chronograf/canned"
# Options to pass to the script on startup
. /etc/default/chronograf
SCRIPT_OPTS="${CHRONOGRAF_OPTS}"
# User to run the process under
RUNAS=chronograf
# PID file for process
PIDFILE=/var/run/chronograf.pid
# Where to redirect logging to
LOGFILE=/var/log/chronograf/chronograf.log
start() {
if [[ -f $PIDFILE ]]; then
# PIDFILE exists
if kill -0 $(cat $PIDFILE) &>/dev/null; then
# PID up, service running
echo '[OK] Service already running.' >&2
return 0
fi
fi
local CMD="$SCRIPT $SCRIPT_OPTS 1>>\"$LOGFILE\" 2>&1 & echo \$!"
su -s /bin/sh -c "$CMD" $RUNAS > "$PIDFILE"
if [[ -f $PIDFILE ]]; then
# PIDFILE exists
if kill -0 $(cat $PIDFILE) &>/dev/null; then
# PID up, service running
echo '[OK] Service successfully started.' >&2
return 0
fi
fi
echo '[ERROR] Could not start service.' >&2
return 1
}
status() {
if [[ -f $PIDFILE ]]; then
# PIDFILE exists
if ps -p $(cat $PIDFILE) &>/dev/null; then
# PID up, service running
echo '[OK] Service running.' >&2
return 0
fi
fi
echo '[ERROR] Service not running.' >&2
return 1
}
stop() {
if [[ -f $PIDFILE ]]; then
# PIDFILE still exists
if kill -0 $(cat $PIDFILE) &>/dev/null; then
# PID still up
kill -15 $(cat $PIDFILE) &>/dev/null && rm -f "$PIDFILE" &>/dev/null
if [[ "$?" = "0" ]]; then
# Successful stop
echo '[OK] Service stopped.' >&2
return 0
else
# Unsuccessful stop
echo '[ERROR] Could not stop service.' >&2
return 1
fi
fi
fi
echo "[OK] Service already stopped."
return 0
}
case "$1" in
start)
if [[ "$UID" != "0" ]]; then
echo "[ERROR] Permission denied."
exit 1
fi
start
;;
status)
status
;;
stop)
if [[ "$UID" != "0" ]]; then
echo "[ERROR] Permission denied."
exit 1
fi
stop
;;
restart)
stop
start
;;
*)
echo "Usage: $0 {start|status|stop|restart}"
esac

View File

@ -1,9 +0,0 @@
/var/log/chronograf/chronograf.log {
daily
rotate 7
missingok
dateext
copytruncate
compress
notifempty
}

View File

@ -1,83 +0,0 @@
#!/bin/bash
BIN_DIR=/usr/bin
DATA_DIR=/var/lib/chronograf
LOG_DIR=/var/log/chronograf
SCRIPT_DIR=/usr/lib/chronograf/scripts
LOGROTATE_DIR=/etc/logrotate.d
function install_init {
cp -f $SCRIPT_DIR/init.sh /etc/init.d/chronograf
chmod +x /etc/init.d/chronograf
}
function install_systemd {
# Remove any existing symlinks
rm -f /etc/systemd/system/chronograf.service
cp -f $SCRIPT_DIR/chronograf.service /lib/systemd/system/chronograf.service
systemctl enable chronograf || true
systemctl daemon-reload || true
}
function install_update_rcd {
update-rc.d chronograf defaults
}
function install_chkconfig {
chkconfig --add chronograf
}
id chronograf &>/dev/null
if [[ $? -ne 0 ]]; then
useradd --system -U -M chronograf -s /bin/false -d $DATA_DIR
fi
test -d $LOG_DIR || mkdir -p $DATA_DIR
test -d $DATA_DIR || mkdir -p $DATA_DIR
chown -R -L chronograf:chronograf $LOG_DIR
chown -R -L chronograf:chronograf $DATA_DIR
chmod 755 $LOG_DIR
chmod 755 $DATA_DIR
# Remove legacy symlink, if it exists
if [[ -L /etc/init.d/chronograf ]]; then
rm -f /etc/init.d/chronograf
fi
# Add defaults file, if it doesn't exist
if [[ ! -f /etc/default/chronograf ]]; then
touch /etc/default/chronograf
fi
# Distribution-specific logic
if [[ -f /etc/redhat-release ]]; then
# RHEL-variant logic
which systemctl &>/dev/null
if [[ $? -eq 0 ]]; then
install_systemd
else
# Assuming sysv
install_init
install_chkconfig
fi
elif [[ -f /etc/debian_version ]]; then
# Debian/Ubuntu logic
which systemctl &>/dev/null
if [[ $? -eq 0 ]]; then
install_systemd
systemctl restart chronograf || echo "WARNING: systemd not running."
else
# Assuming sysv
install_init
install_update_rcd
invoke-rc.d chronograf restart
fi
elif [[ -f /etc/os-release ]]; then
source /etc/os-release
if [[ $ID = "amzn" ]]; then
# Amazon Linux logic
install_init
install_chkconfig
fi
fi

View File

@ -1,56 +0,0 @@
#!/bin/bash
function disable_systemd {
systemctl disable chronograf
rm -f /lib/systemd/system/chronograf.service
}
function disable_update_rcd {
update-rc.d -f chronograf remove
rm -f /etc/init.d/chronograf
}
function disable_chkconfig {
chkconfig --del chronograf
rm -f /etc/init.d/chronograf
}
if [[ -f /etc/redhat-release ]]; then
# RHEL-variant logic
if [[ "$1" = "0" ]]; then
# chronograf is no longer installed, remove from init system
rm -f /etc/default/chronograf
which systemctl &>/dev/null
if [[ $? -eq 0 ]]; then
disable_systemd
else
# Assuming sysv
disable_chkconfig
fi
fi
elif [[ -f /etc/lsb-release ]]; then
# Debian/Ubuntu logic
if [[ "$1" != "upgrade" ]]; then
# Remove/purge
rm -f /etc/default/chronograf
which systemctl &>/dev/null
if [[ $? -eq 0 ]]; then
disable_systemd
else
# Assuming sysv
disable_update_rcd
fi
fi
elif [[ -f /etc/os-release ]]; then
source /etc/os-release
if [[ $ID = "amzn" ]]; then
# Amazon Linux logic
if [[ "$1" = "0" ]]; then
# chronograf is no longer installed, remove from init system
rm -f /etc/default/chronograf
disable_chkconfig
fi
fi
fi

View File

@ -1,205 +0,0 @@
package filestore
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/pkg/fs"
)
// AppExt is the the file extension searched for in the directory for layout files
const AppExt = ".json"
// Apps are canned JSON layouts. Implements LayoutsStore.
type Apps struct {
Dir string // Dir is the directory contained the pre-canned applications.
Load func(string) (chronograf.Layout, error) // Load loads string name and return a Layout
Filename func(string, chronograf.Layout) string // Filename takes dir and layout and returns loadable file
Create func(string, chronograf.Layout) error // Create will write layout to file.
ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename.
Remove func(name string) error // Remove file
IDs chronograf.ID // IDs generate unique ids for new application layouts
Logger chronograf.Logger
}
// NewApps constructs a layout store wrapping a file system directory
func NewApps(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.LayoutsStore {
return &Apps{
Dir: dir,
Load: loadFile,
Filename: fileName,
Create: createLayout,
ReadDir: ioutil.ReadDir,
Remove: os.Remove,
IDs: ids,
Logger: logger,
}
}
func fileName(dir string, layout chronograf.Layout) string {
base := fmt.Sprintf("%s%s", layout.Measurement, AppExt)
return path.Join(dir, base)
}
func loadFile(name string) (chronograf.Layout, error) {
octets, err := ioutil.ReadFile(name)
if err != nil {
return chronograf.Layout{}, chronograf.ErrLayoutNotFound
}
var layout chronograf.Layout
if err = json.Unmarshal(octets, &layout); err != nil {
return chronograf.Layout{}, chronograf.ErrLayoutInvalid
}
return layout, nil
}
func createLayout(file string, layout chronograf.Layout) error {
h, err := fs.CreateFile(file)
if err != nil {
return err
}
defer h.Close()
if octets, err := json.MarshalIndent(layout, " ", " "); err != nil {
return chronograf.ErrLayoutInvalid
} else if _, err := h.Write(octets); err != nil {
return err
}
return nil
}
// All returns all layouts from the directory
func (a *Apps) All(ctx context.Context) ([]chronograf.Layout, error) {
files, err := a.ReadDir(a.Dir)
if err != nil {
return nil, err
}
layouts := []chronograf.Layout{}
for _, file := range files {
if path.Ext(file.Name()) != AppExt {
continue
}
if layout, err := a.Load(path.Join(a.Dir, file.Name())); err != nil {
continue // We want to load all files we can.
} else {
layouts = append(layouts, layout)
}
}
return layouts, nil
}
// Add creates a new layout within the directory
func (a *Apps) Add(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error) {
var err error
layout.ID, err = a.IDs.Generate()
if err != nil {
a.Logger.
WithField("component", "apps").
Error("Unable to generate ID")
return chronograf.Layout{}, err
}
file := a.Filename(a.Dir, layout)
if err = a.Create(file, layout); err != nil {
if err == chronograf.ErrLayoutInvalid {
a.Logger.
WithField("component", "apps").
WithField("name", file).
Error("Invalid Layout: ", err)
} else {
a.Logger.
WithField("component", "apps").
WithField("name", file).
Error("Unable to write layout:", err)
}
return chronograf.Layout{}, err
}
return layout, nil
}
// Delete removes a layout file from the directory
func (a *Apps) Delete(ctx context.Context, layout chronograf.Layout) error {
_, file, err := a.idToFile(layout.ID)
if err != nil {
return err
}
if err := a.Remove(file); err != nil {
a.Logger.
WithField("component", "apps").
WithField("name", file).
Error("Unable to remove layout:", err)
return err
}
return nil
}
// Get returns an app file from the layout directory
func (a *Apps) Get(ctx context.Context, ID string) (chronograf.Layout, error) {
l, file, err := a.idToFile(ID)
if err != nil {
return chronograf.Layout{}, err
}
if err != nil {
if err == chronograf.ErrLayoutNotFound {
a.Logger.
WithField("component", "apps").
WithField("name", file).
Error("Unable to read file")
} else if err == chronograf.ErrLayoutInvalid {
a.Logger.
WithField("component", "apps").
WithField("name", file).
Error("File is not a layout")
}
return chronograf.Layout{}, err
}
return l, nil
}
// Update replaces a layout from the file system directory
func (a *Apps) Update(ctx context.Context, layout chronograf.Layout) error {
l, _, err := a.idToFile(layout.ID)
if err != nil {
return err
}
if err := a.Delete(ctx, l); err != nil {
return err
}
file := a.Filename(a.Dir, layout)
return a.Create(file, layout)
}
// idToFile takes an id and finds the associated filename
func (a *Apps) idToFile(ID string) (chronograf.Layout, string, error) {
// Because the entire layout information is not known at this point, we need
// to try to find the name of the file through matching the ID in the layout
// content with the ID passed.
files, err := a.ReadDir(a.Dir)
if err != nil {
return chronograf.Layout{}, "", err
}
for _, f := range files {
if path.Ext(f.Name()) != AppExt {
continue
}
file := path.Join(a.Dir, f.Name())
layout, err := a.Load(file)
if err != nil {
return chronograf.Layout{}, "", err
}
if layout.ID == ID {
return layout, file, nil
}
}
return chronograf.Layout{}, "", chronograf.ErrLayoutNotFound
}

View File

@ -1,378 +0,0 @@
package filestore_test
import (
"context"
"errors"
"os"
"path"
"path/filepath"
"reflect"
"sort"
"strconv"
"testing"
"time"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/chronograf/filestore"
)
func TestAll(t *testing.T) {
t.Parallel()
var tests = []struct {
Existing []chronograf.Layout
Err error
}{
{
Existing: []chronograf.Layout{
{ID: "1",
Application: "howdy",
},
{ID: "2",
Application: "doody",
},
},
Err: nil,
},
{
Existing: []chronograf.Layout{},
Err: nil,
},
{
Existing: nil,
Err: errors.New("error"),
},
}
for i, test := range tests {
apps, _ := MockApps(test.Existing, test.Err)
layouts, err := apps.All(context.Background())
if err != test.Err {
t.Errorf("Test %d: apps all error expected: %v; actual: %v", i, test.Err, err)
}
if !reflect.DeepEqual(layouts, test.Existing) {
t.Errorf("Test %d: Layouts should be equal; expected %v; actual %v", i, test.Existing, layouts)
}
}
}
func TestAdd(t *testing.T) {
t.Parallel()
var tests = []struct {
Existing []chronograf.Layout
Add chronograf.Layout
ExpectedID string
Err error
}{
{
Existing: []chronograf.Layout{
{ID: "1",
Application: "howdy",
},
{ID: "2",
Application: "doody",
},
},
Add: chronograf.Layout{
Application: "newbie",
},
ExpectedID: "3",
Err: nil,
},
{
Existing: []chronograf.Layout{},
Add: chronograf.Layout{
Application: "newbie",
},
ExpectedID: "1",
Err: nil,
},
{
Existing: nil,
Add: chronograf.Layout{
Application: "newbie",
},
ExpectedID: "",
Err: errors.New("error"),
},
}
for i, test := range tests {
apps, _ := MockApps(test.Existing, test.Err)
layout, err := apps.Add(context.Background(), test.Add)
if err != test.Err {
t.Errorf("Test %d: apps add error expected: %v; actual: %v", i, test.Err, err)
}
if layout.ID != test.ExpectedID {
t.Errorf("Test %d: Layout ID should be equal; expected %s; actual %s", i, test.ExpectedID, layout.ID)
}
}
}
func TestDelete(t *testing.T) {
t.Parallel()
var tests = []struct {
Existing []chronograf.Layout
DeleteID string
Expected map[string]chronograf.Layout
Err error
}{
{
Existing: []chronograf.Layout{
{ID: "1",
Application: "howdy",
},
{ID: "2",
Application: "doody",
},
},
DeleteID: "1",
Expected: map[string]chronograf.Layout{
"dir/2.json": {ID: "2",
Application: "doody",
},
},
Err: nil,
},
{
Existing: []chronograf.Layout{},
DeleteID: "1",
Expected: map[string]chronograf.Layout{},
Err: chronograf.ErrLayoutNotFound,
},
{
Existing: nil,
DeleteID: "1",
Expected: map[string]chronograf.Layout{},
Err: errors.New("error"),
},
}
for i, test := range tests {
apps, actual := MockApps(test.Existing, test.Err)
err := apps.Delete(context.Background(), chronograf.Layout{ID: test.DeleteID})
if err != test.Err {
t.Errorf("Test %d: apps delete error expected: %v; actual: %v", i, test.Err, err)
}
if !reflect.DeepEqual(*actual, test.Expected) {
t.Errorf("Test %d: Layouts should be equal; expected %v; actual %v", i, test.Expected, actual)
}
}
}
func TestGet(t *testing.T) {
t.Parallel()
var tests = []struct {
Existing []chronograf.Layout
ID string
Expected chronograf.Layout
Err error
}{
{
Existing: []chronograf.Layout{
{ID: "1",
Application: "howdy",
},
{ID: "2",
Application: "doody",
},
},
ID: "1",
Expected: chronograf.Layout{
ID: "1",
Application: "howdy",
},
Err: nil,
},
{
Existing: []chronograf.Layout{},
ID: "1",
Expected: chronograf.Layout{},
Err: chronograf.ErrLayoutNotFound,
},
{
Existing: nil,
ID: "1",
Expected: chronograf.Layout{},
Err: chronograf.ErrLayoutNotFound,
},
}
for i, test := range tests {
apps, _ := MockApps(test.Existing, test.Err)
layout, err := apps.Get(context.Background(), test.ID)
if err != test.Err {
t.Errorf("Test %d: Layouts get error expected: %v; actual: %v", i, test.Err, err)
}
if !reflect.DeepEqual(layout, test.Expected) {
t.Errorf("Test %d: Layouts should be equal; expected %v; actual %v", i, test.Expected, layout)
}
}
}
func TestUpdate(t *testing.T) {
t.Parallel()
var tests = []struct {
Existing []chronograf.Layout
Update chronograf.Layout
Expected map[string]chronograf.Layout
Err error
}{
{
Existing: []chronograf.Layout{
{ID: "1",
Application: "howdy",
},
{ID: "2",
Application: "doody",
},
},
Update: chronograf.Layout{
ID: "1",
Application: "hello",
Measurement: "measurement",
},
Expected: map[string]chronograf.Layout{
"dir/1.json": {ID: "1",
Application: "hello",
Measurement: "measurement",
},
"dir/2.json": {ID: "2",
Application: "doody",
},
},
Err: nil,
},
{
Existing: []chronograf.Layout{},
Update: chronograf.Layout{
ID: "1",
},
Expected: map[string]chronograf.Layout{},
Err: chronograf.ErrLayoutNotFound,
},
{
Existing: nil,
Update: chronograf.Layout{
ID: "1",
},
Expected: map[string]chronograf.Layout{},
Err: chronograf.ErrLayoutNotFound,
},
}
for i, test := range tests {
apps, actual := MockApps(test.Existing, test.Err)
err := apps.Update(context.Background(), test.Update)
if err != test.Err {
t.Errorf("Test %d: Layouts get error expected: %v; actual: %v", i, test.Err, err)
}
if !reflect.DeepEqual(*actual, test.Expected) {
t.Errorf("Test %d: Layouts should be equal; expected %v; actual %v", i, test.Expected, actual)
}
}
}
type MockFileInfo struct {
name string
}
func (m *MockFileInfo) Name() string {
return m.name
}
func (m *MockFileInfo) Size() int64 {
return 0
}
func (m *MockFileInfo) Mode() os.FileMode {
return 0666
}
func (m *MockFileInfo) ModTime() time.Time {
return time.Now()
}
func (m *MockFileInfo) IsDir() bool {
return false
}
func (m *MockFileInfo) Sys() interface{} {
return nil
}
type MockFileInfos []os.FileInfo
func (m MockFileInfos) Len() int { return len(m) }
func (m MockFileInfos) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
func (m MockFileInfos) Less(i, j int) bool { return m[i].Name() < m[j].Name() }
type MockID struct {
id int
}
func (m *MockID) Generate() (string, error) {
m.id++
return strconv.Itoa(m.id), nil
}
func MockApps(existing []chronograf.Layout, expected error) (filestore.Apps, *map[string]chronograf.Layout) {
layouts := map[string]chronograf.Layout{}
fileName := func(dir string, layout chronograf.Layout) string {
return path.Join(dir, layout.ID+".json")
}
dir := "dir"
for _, l := range existing {
layouts[fileName(dir, l)] = l
}
load := func(file string) (chronograf.Layout, error) {
if expected != nil {
return chronograf.Layout{}, expected
}
l, ok := layouts[file]
if !ok {
return chronograf.Layout{}, chronograf.ErrLayoutNotFound
}
return l, nil
}
create := func(file string, layout chronograf.Layout) error {
if expected != nil {
return expected
}
layouts[file] = layout
return nil
}
readDir := func(dirname string) ([]os.FileInfo, error) {
if expected != nil {
return nil, expected
}
info := []os.FileInfo{}
for k := range layouts {
info = append(info, &MockFileInfo{filepath.Base(k)})
}
sort.Sort(MockFileInfos(info))
return info, nil
}
remove := func(name string) error {
if expected != nil {
return expected
}
if _, ok := layouts[name]; !ok {
return chronograf.ErrLayoutNotFound
}
delete(layouts, name)
return nil
}
return filestore.Apps{
Dir: dir,
Load: load,
Filename: fileName,
Create: create,
ReadDir: readDir,
Remove: remove,
IDs: &MockID{
id: len(existing),
},
Logger: &chronograf.NoopLogger{},
}, &layouts
}

View File

@ -1,211 +0,0 @@
package filestore
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"github.com/influxdata/influxdb/v2/chronograf"
"github.com/influxdata/influxdb/v2/pkg/fs"
)
// DashExt is the the file extension searched for in the directory for dashboard files
const DashExt = ".dashboard"
var _ chronograf.DashboardsStore = &Dashboards{}
// Dashboards are JSON dashboards stored in the filesystem
type Dashboards struct {
Dir string // Dir is the directory containing the dashboards.
Load func(string, interface{}) error // Load loads string name and dashboard passed in as interface
Create func(string, interface{}) error // Create will write dashboard to file.
ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename.
Remove func(name string) error // Remove file
IDs chronograf.ID // IDs generate unique ids for new dashboards
Logger chronograf.Logger
}
// NewDashboards constructs a dashboard store wrapping a file system directory
func NewDashboards(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.DashboardsStore {
return &Dashboards{
Dir: dir,
Load: load,
Create: create,
ReadDir: ioutil.ReadDir,
Remove: os.Remove,
IDs: ids,
Logger: logger,
}
}
func dashboardFile(dir string, dashboard chronograf.Dashboard) string {
base := fmt.Sprintf("%s%s", dashboard.Name, DashExt)
return path.Join(dir, base)
}
func load(name string, resource interface{}) error {
octets, err := templatedFromEnv(name)
if err != nil {
return fmt.Errorf("resource %s not found", name)
}
return json.Unmarshal(octets, resource)
}
func create(file string, resource interface{}) error {
h, err := fs.CreateFile(file)
if err != nil {
return err
}
defer h.Close()
octets, err := json.MarshalIndent(resource, " ", " ")
if err != nil {
return err
}
_, err = h.Write(octets)
return err
}
// All returns all dashboards from the directory
func (d *Dashboards) All(ctx context.Context) ([]chronograf.Dashboard, error) {
files, err := d.ReadDir(d.Dir)
if err != nil {
return nil, err
}
dashboards := []chronograf.Dashboard{}
for _, file := range files {
if path.Ext(file.Name()) != DashExt {
continue
}
var dashboard chronograf.Dashboard
if err := d.Load(path.Join(d.Dir, file.Name()), &dashboard); err != nil {
continue // We want to load all files we can.
} else {
dashboards = append(dashboards, dashboard)
}
}
return dashboards, nil
}
// Add creates a new dashboard within the directory
func (d *Dashboards) Add(ctx context.Context, dashboard chronograf.Dashboard) (chronograf.Dashboard, error) {
genID, err := d.IDs.Generate()
if err != nil {
d.Logger.
WithField("component", "dashboard").
Error("Unable to generate ID")
return chronograf.Dashboard{}, err
}
id, err := strconv.Atoi(genID)
if err != nil {
d.Logger.
WithField("component", "dashboard").
Error("Unable to convert ID")
return chronograf.Dashboard{}, err
}
dashboard.ID = chronograf.DashboardID(id)
file := dashboardFile(d.Dir, dashboard)
if err = d.Create(file, dashboard); err != nil {
if err == chronograf.ErrDashboardInvalid {
d.Logger.
WithField("component", "dashboard").
WithField("name", file).
Error("Invalid Dashboard: ", err)
} else {
d.Logger.
WithField("component", "dashboard").
WithField("name", file).
Error("Unable to write dashboard:", err)
}
return chronograf.Dashboard{}, err
}
return dashboard, nil
}
// Delete removes a dashboard file from the directory
func (d *Dashboards) Delete(ctx context.Context, dashboard chronograf.Dashboard) error {
_, file, err := d.idToFile(dashboard.ID)
if err != nil {
return err
}
if err := d.Remove(file); err != nil {
d.Logger.
WithField("component", "dashboard").
WithField("name", file).
Error("Unable to remove dashboard:", err)
return err
}
return nil
}
// Get returns a dashboard file from the dashboard directory
func (d *Dashboards) Get(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) {
board, file, err := d.idToFile(id)
if err != nil {
if err == chronograf.ErrDashboardNotFound {
d.Logger.
WithField("component", "dashboard").
WithField("name", file).
Error("Unable to read file")
} else if err == chronograf.ErrDashboardInvalid {
d.Logger.
WithField("component", "dashboard").
WithField("name", file).
Error("File is not a dashboard")
}
return chronograf.Dashboard{}, err
}
return board, nil
}
// Update replaces a dashboard from the file system directory
func (d *Dashboards) Update(ctx context.Context, dashboard chronograf.Dashboard) error {
board, _, err := d.idToFile(dashboard.ID)
if err != nil {
return err
}
if err := d.Delete(ctx, board); err != nil {
return err
}
file := dashboardFile(d.Dir, dashboard)
return d.Create(file, dashboard)
}
// idToFile takes an id and finds the associated filename
func (d *Dashboards) idToFile(id chronograf.DashboardID) (chronograf.Dashboard, string, error) {
// Because the entire dashboard information is not known at this point, we need
// to try to find the name of the file through matching the ID in the dashboard
// content with the ID passed.
files, err := d.ReadDir(d.Dir)
if err != nil {
return chronograf.Dashboard{}, "", err
}
for _, f := range files {
if path.Ext(f.Name()) != DashExt {
continue
}
file := path.Join(d.Dir, f.Name())
var dashboard chronograf.Dashboard
if err := d.Load(file, &dashboard); err != nil {
return chronograf.Dashboard{}, "", err
}
if dashboard.ID == id {
return dashboard, file, nil
}
}
return chronograf.Dashboard{}, "", chronograf.ErrDashboardNotFound
}

View File

@ -1,24 +0,0 @@
package filestore
import (
"os"
"strings"
)
var env map[string]string
// environ returns a map of all environment variables in the running process
func environ() map[string]string {
if env == nil {
env = make(map[string]string)
envVars := os.Environ()
for _, envVar := range envVars {
kv := strings.SplitN(envVar, "=", 2)
if len(kv) != 2 {
continue
}
env[kv[0]] = kv[1]
}
}
return env
}

View File

@ -1,29 +0,0 @@
package filestore
import (
"os"
"testing"
)
func Test_environ(t *testing.T) {
tests := []struct {
name string
key string
value string
}{
{
name: "environment variable is returned",
key: "CHRONOGRAF_TEST_ENVIRON",
value: "howdy",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
os.Setenv(tt.key, tt.value)
got := environ()
if v, ok := got[tt.key]; !ok || v != tt.value {
t.Errorf("environ() = %v, want %v", v, tt.value)
}
})
}
}

View File

@ -1,186 +0,0 @@
package filestore
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"github.com/influxdata/influxdb/v2/chronograf"
)
// KapExt is the the file extension searched for in the directory for kapacitor files
const KapExt = ".kap"
var _ chronograf.ServersStore = &Kapacitors{}
// Kapacitors are JSON kapacitors stored in the filesystem
type Kapacitors struct {
Dir string // Dir is the directory containing the kapacitors.
Load func(string, interface{}) error // Load loads string name and dashboard passed in as interface
Create func(string, interface{}) error // Create will write kapacitor to file.
ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename.
Remove func(name string) error // Remove file
IDs chronograf.ID // IDs generate unique ids for new kapacitors
Logger chronograf.Logger
}
// NewKapacitors constructs a kapacitor store wrapping a file system directory
func NewKapacitors(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.ServersStore {
return &Kapacitors{
Dir: dir,
Load: load,
Create: create,
ReadDir: ioutil.ReadDir,
Remove: os.Remove,
IDs: ids,
Logger: logger,
}
}
func kapacitorFile(dir string, kapacitor chronograf.Server) string {
base := fmt.Sprintf("%s%s", kapacitor.Name, KapExt)
return path.Join(dir, base)
}
// All returns all kapacitors from the directory
func (d *Kapacitors) All(ctx context.Context) ([]chronograf.Server, error) {
files, err := d.ReadDir(d.Dir)
if err != nil {
return nil, err
}
kapacitors := []chronograf.Server{}
for _, file := range files {
if path.Ext(file.Name()) != KapExt {
continue
}
var kapacitor chronograf.Server
if err := d.Load(path.Join(d.Dir, file.Name()), &kapacitor); err != nil {
var fmtErr = fmt.Errorf("error loading kapacitor configuration from %v:\n%v", path.Join(d.Dir, file.Name()), err)
d.Logger.Error(fmtErr)
continue // We want to load all files we can.
} else {
kapacitors = append(kapacitors, kapacitor)
}
}
return kapacitors, nil
}
// Add creates a new kapacitor within the directory
func (d *Kapacitors) Add(ctx context.Context, kapacitor chronograf.Server) (chronograf.Server, error) {
genID, err := d.IDs.Generate()
if err != nil {
d.Logger.
WithField("component", "kapacitor").
Error("Unable to generate ID")
return chronograf.Server{}, err
}
id, err := strconv.Atoi(genID)
if err != nil {
d.Logger.
WithField("component", "kapacitor").
Error("Unable to convert ID")
return chronograf.Server{}, err
}
kapacitor.ID = id
file := kapacitorFile(d.Dir, kapacitor)
if err = d.Create(file, kapacitor); err != nil {
if err == chronograf.ErrServerInvalid {
d.Logger.
WithField("component", "kapacitor").
WithField("name", file).
Error("Invalid Server: ", err)
} else {
d.Logger.
WithField("component", "kapacitor").
WithField("name", file).
Error("Unable to write kapacitor:", err)
}
return chronograf.Server{}, err
}
return kapacitor, nil
}
// Delete removes a kapacitor file from the directory
func (d *Kapacitors) Delete(ctx context.Context, kapacitor chronograf.Server) error {
_, file, err := d.idToFile(kapacitor.ID)
if err != nil {
return err
}
if err := d.Remove(file); err != nil {
d.Logger.
WithField("component", "kapacitor").
WithField("name", file).
Error("Unable to remove kapacitor:", err)
return err
}
return nil
}
// Get returns a kapacitor file from the kapacitor directory
func (d *Kapacitors) Get(ctx context.Context, id int) (chronograf.Server, error) {
board, file, err := d.idToFile(id)
if err != nil {
if err == chronograf.ErrServerNotFound {
d.Logger.
WithField("component", "kapacitor").
WithField("name", file).
Error("Unable to read file")
} else if err == chronograf.ErrServerInvalid {
d.Logger.
WithField("component", "kapacitor").
WithField("name", file).
Error("File is not a kapacitor")
}
return chronograf.Server{}, err
}
return board, nil
}
// Update replaces a kapacitor from the file system directory
func (d *Kapacitors) Update(ctx context.Context, kapacitor chronograf.Server) error {
board, _, err := d.idToFile(kapacitor.ID)
if err != nil {
return err
}
if err := d.Delete(ctx, board); err != nil {
return err
}
file := kapacitorFile(d.Dir, kapacitor)
return d.Create(file, kapacitor)
}
// idToFile takes an id and finds the associated filename
func (d *Kapacitors) idToFile(id int) (chronograf.Server, string, error) {
// Because the entire kapacitor information is not known at this point, we need
// to try to find the name of the file through matching the ID in the kapacitor
// content with the ID passed.
files, err := d.ReadDir(d.Dir)
if err != nil {
return chronograf.Server{}, "", err
}
for _, f := range files {
if path.Ext(f.Name()) != KapExt {
continue
}
file := path.Join(d.Dir, f.Name())
var kapacitor chronograf.Server
if err := d.Load(file, &kapacitor); err != nil {
return chronograf.Server{}, "", err
}
if kapacitor.ID == id {
return kapacitor, file, nil
}
}
return chronograf.Server{}, "", chronograf.ErrServerNotFound
}

View File

@ -1,117 +0,0 @@
package filestore
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"github.com/influxdata/influxdb/v2/chronograf"
)
// OrgExt is the the file extension searched for in the directory for org files
const OrgExt = ".org"
var _ chronograf.OrganizationsStore = (*Organizations)(nil)
// Organizations are JSON orgs stored in the filesystem
type Organizations struct {
Dir string // Dir is the directory containing the orgs.
Load func(string, interface{}) error // Load loads string name and org passed in as interface
ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename.
Logger chronograf.Logger
}
// NewOrganizations constructs a org store wrapping a file system directory
func NewOrganizations(dir string, logger chronograf.Logger) chronograf.OrganizationsStore {
return &Organizations{
Dir: dir,
Load: load,
ReadDir: ioutil.ReadDir,
Logger: logger,
}
}
// All returns all orgs from the directory
func (o *Organizations) All(ctx context.Context) ([]chronograf.Organization, error) {
files, err := o.ReadDir(o.Dir)
if err != nil {
return nil, err
}
orgs := []chronograf.Organization{}
for _, file := range files {
if path.Ext(file.Name()) != OrgExt {
continue
}
var org chronograf.Organization
if err := o.Load(path.Join(o.Dir, file.Name()), &org); err != nil {
continue // We want to load all files we can.
} else {
orgs = append(orgs, org)
}
}
return orgs, nil
}
// Get returns a org file from the org directory
func (o *Organizations) Get(ctx context.Context, query chronograf.OrganizationQuery) (*chronograf.Organization, error) {
org, _, err := o.findOrg(query)
return org, err
}
// Add is not allowed for the filesystem organization store
func (o *Organizations) Add(ctx context.Context, org *chronograf.Organization) (*chronograf.Organization, error) {
return nil, fmt.Errorf("unable to add organizations to the filesystem")
}
// Delete is not allowed for the filesystem organization store
func (o *Organizations) Delete(ctx context.Context, org *chronograf.Organization) error {
return fmt.Errorf("unable to delete an organization from the filesystem")
}
// Update is not allowed for the filesystem organization store
func (o *Organizations) Update(ctx context.Context, org *chronograf.Organization) error {
return fmt.Errorf("unable to update organizations on the filesystem")
}
// CreateDefault is not allowed for the filesystem organization store
func (o *Organizations) CreateDefault(ctx context.Context) error {
return fmt.Errorf("unable to create default organizations on the filesystem")
}
// DefaultOrganization is not allowed for the filesystem organization store
func (o *Organizations) DefaultOrganization(ctx context.Context) (*chronograf.Organization, error) {
return nil, fmt.Errorf("unable to get default organizations from the filestore")
}
// findOrg takes an OrganizationQuery and finds the associated filename
func (o *Organizations) findOrg(query chronograf.OrganizationQuery) (*chronograf.Organization, string, error) {
// Because the entire org information is not known at this point, we need
// to try to find the name of the file through matching the ID or name in the org
// content with the ID passed.
files, err := o.ReadDir(o.Dir)
if err != nil {
return nil, "", err
}
for _, f := range files {
if path.Ext(f.Name()) != OrgExt {
continue
}
file := path.Join(o.Dir, f.Name())
var org chronograf.Organization
if err := o.Load(file, &org); err != nil {
return nil, "", err
}
if query.ID != nil && org.ID == *query.ID {
return &org, file, nil
}
if query.Name != nil && org.Name == *query.Name {
return &org, file, nil
}
}
return nil, "", chronograf.ErrOrganizationNotFound
}

View File

@ -1,186 +0,0 @@
package filestore
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"github.com/influxdata/influxdb/v2/chronograf"
)
// SrcExt is the the file extension searched for in the directory for source files
const SrcExt = ".src"
var _ chronograf.SourcesStore = &Sources{}
// Sources are JSON sources stored in the filesystem
type Sources struct {
Dir string // Dir is the directory containing the sources.
Load func(string, interface{}) error // Load loads string name and dashboard passed in as interface
Create func(string, interface{}) error // Create will write source to file.
ReadDir func(dirname string) ([]os.FileInfo, error) // ReadDir reads the directory named by dirname and returns a list of directory entries sorted by filename.
Remove func(name string) error // Remove file
IDs chronograf.ID // IDs generate unique ids for new sources
Logger chronograf.Logger
}
// NewSources constructs a source store wrapping a file system directory
func NewSources(dir string, ids chronograf.ID, logger chronograf.Logger) chronograf.SourcesStore {
return &Sources{
Dir: dir,
Load: load,
Create: create,
ReadDir: ioutil.ReadDir,
Remove: os.Remove,
IDs: ids,
Logger: logger,
}
}
func sourceFile(dir string, source chronograf.Source) string {
base := fmt.Sprintf("%s%s", source.Name, SrcExt)
return path.Join(dir, base)
}
// All returns all sources from the directory
func (d *Sources) All(ctx context.Context) ([]chronograf.Source, error) {
files, err := d.ReadDir(d.Dir)
if err != nil {
return nil, err
}
sources := []chronograf.Source{}
for _, file := range files {
if path.Ext(file.Name()) != SrcExt {
continue
}
var source chronograf.Source
if err := d.Load(path.Join(d.Dir, file.Name()), &source); err != nil {
var fmtErr = fmt.Errorf("error loading source configuration from %v:\n%v", path.Join(d.Dir, file.Name()), err)
d.Logger.Error(fmtErr)
continue // We want to load all files we can.
} else {
sources = append(sources, source)
}
}
return sources, nil
}
// Add creates a new source within the directory
func (d *Sources) Add(ctx context.Context, source chronograf.Source) (chronograf.Source, error) {
genID, err := d.IDs.Generate()
if err != nil {
d.Logger.
WithField("component", "source").
Error("Unable to generate ID")
return chronograf.Source{}, err
}
id, err := strconv.Atoi(genID)
if err != nil {
d.Logger.
WithField("component", "source").
Error("Unable to convert ID")
return chronograf.Source{}, err
}
source.ID = id
file := sourceFile(d.Dir, source)
if err = d.Create(file, source); err != nil {
if err == chronograf.ErrSourceInvalid {
d.Logger.
WithField("component", "source").
WithField("name", file).
Error("Invalid Source: ", err)
} else {
d.Logger.
WithField("component", "source").
WithField("name", file).
Error("Unable to write source:", err)
}
return chronograf.Source{}, err
}
return source, nil
}
// Delete removes a source file from the directory
func (d *Sources) Delete(ctx context.Context, source chronograf.Source) error {
_, file, err := d.idToFile(source.ID)
if err != nil {
return err
}
if err := d.Remove(file); err != nil {
d.Logger.
WithField("component", "source").
WithField("name", file).
Error("Unable to remove source:", err)
return err
}
return nil
}
// Get returns a source file from the source directory
func (d *Sources) Get(ctx context.Context, id int) (chronograf.Source, error) {
board, file, err := d.idToFile(id)
if err != nil {
if err == chronograf.ErrSourceNotFound {
d.Logger.
WithField("component", "source").
WithField("name", file).
Error("Unable to read file")
} else if err == chronograf.ErrSourceInvalid {
d.Logger.
WithField("component", "source").
WithField("name", file).
Error("File is not a source")
}
return chronograf.Source{}, err
}
return board, nil
}
// Update replaces a source from the file system directory
func (d *Sources) Update(ctx context.Context, source chronograf.Source) error {
board, _, err := d.idToFile(source.ID)
if err != nil {
return err
}
if err := d.Delete(ctx, board); err != nil {
return err
}
file := sourceFile(d.Dir, source)
return d.Create(file, source)
}
// idToFile takes an id and finds the associated filename
func (d *Sources) idToFile(id int) (chronograf.Source, string, error) {
// Because the entire source information is not known at this point, we need
// to try to find the name of the file through matching the ID in the source
// content with the ID passed.
files, err := d.ReadDir(d.Dir)
if err != nil {
return chronograf.Source{}, "", err
}
for _, f := range files {
if path.Ext(f.Name()) != SrcExt {
continue
}
file := path.Join(d.Dir, f.Name())
var source chronograf.Source
if err := d.Load(file, &source); err != nil {
return chronograf.Source{}, "", err
}
if source.ID == id {
return source, file, nil
}
}
return chronograf.Source{}, "", chronograf.ErrSourceNotFound
}

View File

@ -1,28 +0,0 @@
package filestore
import (
"bytes"
"html/template"
)
// templated returns all files templated using data
func templated(data interface{}, filenames ...string) ([]byte, error) {
t, err := template.ParseFiles(filenames...)
if err != nil {
return nil, err
}
var b bytes.Buffer
// If a key in the file exists but is not in the data we
// immediately fail with a missing key error
err = t.Option("missingkey=error").Execute(&b, data)
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
// templatedFromEnv returns all files templated against environment variables
func templatedFromEnv(filenames ...string) ([]byte, error) {
return templated(environ(), filenames...)
}

View File

@ -1,64 +0,0 @@
package filestore
import (
"io/ioutil"
"os"
"reflect"
"testing"
)
func Test_templated(t *testing.T) {
tests := []struct {
name string
content []string
data interface{}
want []byte
wantErr bool
}{
{
name: "files with templates are rendered correctly",
content: []string{
"{{ .MYVAR }}",
},
data: map[string]string{
"MYVAR": "howdy",
},
want: []byte("howdy"),
},
{
name: "missing key gives an error",
content: []string{
"{{ .MYVAR }}",
},
wantErr: true,
},
{
name: "no files make me an error!",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
filenames := make([]string, len(tt.content))
for i, c := range tt.content {
f, err := ioutil.TempFile("", "")
if err != nil {
t.Fatal(err)
}
if _, err := f.Write([]byte(c)); err != nil {
t.Fatal(err)
}
filenames[i] = f.Name()
defer os.Remove(f.Name())
}
got, err := templated(tt.data, filenames...)
if (err != nil) != tt.wantErr {
t.Errorf("templated() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("templated() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,56 +0,0 @@
package memdb
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
// Ensure KapacitorStore implements chronograf.ServersStore.
var _ chronograf.ServersStore = &KapacitorStore{}
// KapacitorStore implements the chronograf.ServersStore interface, and keeps
// an in-memory Kapacitor according to startup configuration
type KapacitorStore struct {
Kapacitor *chronograf.Server
}
// All will return a slice containing a configured source
func (store *KapacitorStore) All(ctx context.Context) ([]chronograf.Server, error) {
if store.Kapacitor != nil {
return []chronograf.Server{*store.Kapacitor}, nil
}
return nil, nil
}
// Add does not have any effect
func (store *KapacitorStore) Add(ctx context.Context, kap chronograf.Server) (chronograf.Server, error) {
return chronograf.Server{}, fmt.Errorf("in-memory KapacitorStore does not support adding a Kapacitor")
}
// Delete removes the in-memory configured Kapacitor if its ID matches what's provided
func (store *KapacitorStore) Delete(ctx context.Context, kap chronograf.Server) error {
if store.Kapacitor == nil || store.Kapacitor.ID != kap.ID {
return fmt.Errorf("unable to find Kapacitor with id %d", kap.ID)
}
store.Kapacitor = nil
return nil
}
// Get returns the in-memory Kapacitor if its ID matches what's provided
func (store *KapacitorStore) Get(ctx context.Context, id int) (chronograf.Server, error) {
if store.Kapacitor == nil || store.Kapacitor.ID != id {
return chronograf.Server{}, fmt.Errorf("unable to find Kapacitor with id %d", id)
}
return *store.Kapacitor, nil
}
// Update overwrites the in-memory configured Kapacitor if its ID matches what's provided
func (store *KapacitorStore) Update(ctx context.Context, kap chronograf.Server) error {
if store.Kapacitor == nil || store.Kapacitor.ID != kap.ID {
return fmt.Errorf("unable to find Kapacitor with id %d", kap.ID)
}
store.Kapacitor = &kap
return nil
}

View File

@ -1,128 +0,0 @@
package memdb
import (
"context"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestInterfaceImplementation(t *testing.T) {
var _ chronograf.ServersStore = &KapacitorStore{}
}
func TestKapacitorStoreAll(t *testing.T) {
ctx := context.Background()
store := KapacitorStore{}
kaps, err := store.All(ctx)
if err != nil {
t.Fatal("All should not throw an error with an empty Store")
}
if len(kaps) != 0 {
t.Fatal("Store should be empty")
}
store.Kapacitor = &chronograf.Server{}
kaps, err = store.All(ctx)
if err != nil {
t.Fatal("All should not throw an error with an empty Store")
}
if len(kaps) != 1 {
t.Fatal("Store should have 1 element")
}
}
func TestKapacitorStoreAdd(t *testing.T) {
ctx := context.Background()
store := KapacitorStore{}
_, err := store.Add(ctx, chronograf.Server{})
if err == nil {
t.Fatal("Store should not support adding another source")
}
}
func TestKapacitorStoreDelete(t *testing.T) {
ctx := context.Background()
store := KapacitorStore{}
err := store.Delete(ctx, chronograf.Server{})
if err == nil {
t.Fatal("Delete should not operate on an empty Store")
}
store.Kapacitor = &chronograf.Server{
ID: 9,
}
err = store.Delete(ctx, chronograf.Server{
ID: 8,
})
if err == nil {
t.Fatal("Delete should not remove elements with the wrong ID")
}
err = store.Delete(ctx, chronograf.Server{
ID: 9,
})
if err != nil {
t.Fatal("Delete should remove an element with a matching ID")
}
}
func TestKapacitorStoreGet(t *testing.T) {
ctx := context.Background()
store := KapacitorStore{}
_, err := store.Get(ctx, 9)
if err == nil {
t.Fatal("Get should return an error for an empty Store")
}
store.Kapacitor = &chronograf.Server{
ID: 9,
}
_, err = store.Get(ctx, 8)
if err == nil {
t.Fatal("Get should return an error if it finds no matches")
}
store.Kapacitor = &chronograf.Server{
ID: 9,
}
kap, err := store.Get(ctx, 9)
if err != nil || kap.ID != 9 {
t.Fatal("Get should find the element with a matching ID")
}
}
func TestKapacitorStoreUpdate(t *testing.T) {
ctx := context.Background()
store := KapacitorStore{}
err := store.Update(ctx, chronograf.Server{})
if err == nil {
t.Fatal("Update fhouls return an error for an empty Store")
}
store.Kapacitor = &chronograf.Server{
ID: 9,
}
err = store.Update(ctx, chronograf.Server{
ID: 8,
})
if err == nil {
t.Fatal("Update should return an error if it finds no matches")
}
store.Kapacitor = &chronograf.Server{
ID: 9,
}
err = store.Update(ctx, chronograf.Server{
ID: 9,
URL: "http://crystal.pepsi.com",
})
if err != nil || store.Kapacitor.URL != "http://crystal.pepsi.com" {
t.Fatal("Update should overwrite elements with matching IDs")
}
}

View File

@ -1,55 +0,0 @@
package memdb
import (
"context"
"fmt"
"github.com/influxdata/influxdb/v2/chronograf"
)
// Ensure SourcesStore implements chronograf.SourcesStore.
var _ chronograf.SourcesStore = &SourcesStore{}
// SourcesStore implements the chronograf.SourcesStore interface
type SourcesStore struct {
Source *chronograf.Source
}
// Add does not have any effect
func (store *SourcesStore) Add(ctx context.Context, src chronograf.Source) (chronograf.Source, error) {
return chronograf.Source{}, fmt.Errorf("in-memory SourcesStore does not support adding a Source")
}
// All will return a slice containing a configured source
func (store *SourcesStore) All(ctx context.Context) ([]chronograf.Source, error) {
if store.Source != nil {
return []chronograf.Source{*store.Source}, nil
}
return nil, nil
}
// Delete removes the SourcesStore.Source if it matches the provided Source
func (store *SourcesStore) Delete(ctx context.Context, src chronograf.Source) error {
if store.Source == nil || store.Source.ID != src.ID {
return fmt.Errorf("unable to find Source with id %d", src.ID)
}
store.Source = nil
return nil
}
// Get returns the configured source if the id matches
func (store *SourcesStore) Get(ctx context.Context, id int) (chronograf.Source, error) {
if store.Source == nil || store.Source.ID != id {
return chronograf.Source{}, fmt.Errorf("unable to find Source with id %d", id)
}
return *store.Source, nil
}
// Update does nothing
func (store *SourcesStore) Update(ctx context.Context, src chronograf.Source) error {
if store.Source == nil || store.Source.ID != src.ID {
return fmt.Errorf("unable to find Source with id %d", src.ID)
}
store.Source = &src
return nil
}

View File

@ -1,128 +0,0 @@
package memdb
import (
"context"
"testing"
"github.com/influxdata/influxdb/v2/chronograf"
)
func TestSourcesStore(t *testing.T) {
var _ chronograf.SourcesStore = &SourcesStore{}
}
func TestSourcesStoreAdd(t *testing.T) {
ctx := context.Background()
store := SourcesStore{}
_, err := store.Add(ctx, chronograf.Source{})
if err == nil {
t.Fatal("Store should not support adding another source")
}
}
func TestSourcesStoreAll(t *testing.T) {
ctx := context.Background()
store := SourcesStore{}
srcs, err := store.All(ctx)
if err != nil {
t.Fatal("All should not throw an error with an empty Store")
}
if len(srcs) != 0 {
t.Fatal("Store should be empty")
}
store.Source = &chronograf.Source{}
srcs, err = store.All(ctx)
if err != nil {
t.Fatal("All should not throw an error with an empty Store")
}
if len(srcs) != 1 {
t.Fatal("Store should have 1 element")
}
}
func TestSourcesStoreDelete(t *testing.T) {
ctx := context.Background()
store := SourcesStore{}
err := store.Delete(ctx, chronograf.Source{})
if err == nil {
t.Fatal("Delete should not operate on an empty Store")
}
store.Source = &chronograf.Source{
ID: 9,
}
err = store.Delete(ctx, chronograf.Source{
ID: 8,
})
if err == nil {
t.Fatal("Delete should not remove elements with the wrong ID")
}
err = store.Delete(ctx, chronograf.Source{
ID: 9,
})
if err != nil {
t.Fatal("Delete should remove an element with a matching ID")
}
}
func TestSourcesStoreGet(t *testing.T) {
ctx := context.Background()
store := SourcesStore{}
_, err := store.Get(ctx, 9)
if err == nil {
t.Fatal("Get should return an error for an empty Store")
}
store.Source = &chronograf.Source{
ID: 9,
}
_, err = store.Get(ctx, 8)
if err == nil {
t.Fatal("Get should return an error if it finds no matches")
}
store.Source = &chronograf.Source{
ID: 9,
}
src, err := store.Get(ctx, 9)
if err != nil || src.ID != 9 {
t.Fatal("Get should find the element with a matching ID")
}
}
func TestSourcesStoreUpdate(t *testing.T) {
ctx := context.Background()
store := SourcesStore{}
err := store.Update(ctx, chronograf.Source{})
if err == nil {
t.Fatal("Update should return an error for an empty Store")
}
store.Source = &chronograf.Source{
ID: 9,
}
err = store.Update(ctx, chronograf.Source{
ID: 8,
})
if err == nil {
t.Fatal("Update should return an error if it finds no matches")
}
store.Source = &chronograf.Source{
ID: 9,
}
err = store.Update(ctx, chronograf.Source{
ID: 9,
URL: "http://crystal.pepsi.com",
})
if err != nil || store.Source.URL != "http://crystal.pepsi.com" {
t.Fatal("Update should overwrite elements with matching IDs")
}
}

View File

@ -1,51 +0,0 @@
package mocks
import (
"context"
"net/http"
"github.com/influxdata/influxdb/v2/chronograf/oauth2"
)
// Authenticator implements a OAuth2 authenticator
type Authenticator struct {
Principal oauth2.Principal
ValidateErr error
ExtendErr error
Serialized string
}
// Validate returns Principal associated with authenticated and authorized
// entity if successful.
func (a *Authenticator) Validate(context.Context, *http.Request) (oauth2.Principal, error) {
return a.Principal, a.ValidateErr
}
// Extend will extend the lifetime of a already validated Principal
func (a *Authenticator) Extend(ctx context.Context, w http.ResponseWriter, p oauth2.Principal) (oauth2.Principal, error) {
cookie := http.Cookie{}
http.SetCookie(w, &cookie)
return a.Principal, a.ExtendErr
}
// Authorize will grant privileges to a Principal
func (a *Authenticator) Authorize(ctx context.Context, w http.ResponseWriter, p oauth2.Principal) error {
cookie := http.Cookie{}
http.SetCookie(w, &cookie)
return nil
}
// Expire revokes privileges from a Principal
func (a *Authenticator) Expire(http.ResponseWriter) {}
// ValidAuthorization returns the Principal
func (a *Authenticator) ValidAuthorization(ctx context.Context, serializedAuthorization string) (oauth2.Principal, error) {
return oauth2.Principal{}, nil
}
// Serialize the serialized values stored on the Authenticator
func (a *Authenticator) Serialize(context.Context, oauth2.Principal) (string, error) {
return a.Serialized, nil
}

View File

@ -1,28 +0,0 @@
package mocks
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
)
// ConfigStore stores global application configuration
type ConfigStore struct {
Config *chronograf.Config
}
// Initialize is noop in mocks store
func (c ConfigStore) Initialize(ctx context.Context) error {
return nil
}
// Get returns the whole global application configuration
func (c ConfigStore) Get(ctx context.Context) (*chronograf.Config, error) {
return c.Config, nil
}
// Update updates the whole global application configuration
func (c ConfigStore) Update(ctx context.Context, config *chronograf.Config) error {
c.Config = config
return nil
}

View File

@ -1,37 +0,0 @@
package mocks
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
)
var _ chronograf.DashboardsStore = &DashboardsStore{}
type DashboardsStore struct {
AddF func(ctx context.Context, newDashboard chronograf.Dashboard) (chronograf.Dashboard, error)
AllF func(ctx context.Context) ([]chronograf.Dashboard, error)
DeleteF func(ctx context.Context, target chronograf.Dashboard) error
GetF func(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error)
UpdateF func(ctx context.Context, target chronograf.Dashboard) error
}
func (d *DashboardsStore) Add(ctx context.Context, newDashboard chronograf.Dashboard) (chronograf.Dashboard, error) {
return d.AddF(ctx, newDashboard)
}
func (d *DashboardsStore) All(ctx context.Context) ([]chronograf.Dashboard, error) {
return d.AllF(ctx)
}
func (d *DashboardsStore) Delete(ctx context.Context, target chronograf.Dashboard) error {
return d.DeleteF(ctx, target)
}
func (d *DashboardsStore) Get(ctx context.Context, id chronograf.DashboardID) (chronograf.Dashboard, error) {
return d.GetF(ctx, id)
}
func (d *DashboardsStore) Update(ctx context.Context, target chronograf.Dashboard) error {
return d.UpdateF(ctx, target)
}

View File

@ -1,69 +0,0 @@
package mocks
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
)
var _ chronograf.Databases = &Databases{}
// Databases mock allows all databases methods to be set for testing
type Databases struct {
AllDBF func(context.Context) ([]chronograf.Database, error)
ConnectF func(context.Context, *chronograf.Source) error
CreateDBF func(context.Context, *chronograf.Database) (*chronograf.Database, error)
DropDBF func(context.Context, string) error
AllRPF func(context.Context, string) ([]chronograf.RetentionPolicy, error)
CreateRPF func(context.Context, string, *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error)
UpdateRPF func(context.Context, string, string, *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error)
DropRPF func(context.Context, string, string) error
GetMeasurementsF func(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error)
}
// AllDB lists all databases in the current data source
func (d *Databases) AllDB(ctx context.Context) ([]chronograf.Database, error) {
return d.AllDBF(ctx)
}
// Connect connects to a database in the current data source
func (d *Databases) Connect(ctx context.Context, src *chronograf.Source) error {
return d.ConnectF(ctx, src)
}
// CreateDB creates a database in the current data source
func (d *Databases) CreateDB(ctx context.Context, db *chronograf.Database) (*chronograf.Database, error) {
return d.CreateDBF(ctx, db)
}
// DropDB drops a database in the current data source
func (d *Databases) DropDB(ctx context.Context, db string) error {
return d.DropDBF(ctx, db)
}
// AllRP lists all retention policies in the current data source
func (d *Databases) AllRP(ctx context.Context, rpX string) ([]chronograf.RetentionPolicy, error) {
return d.AllRPF(ctx, rpX)
}
// CreateRP creates a retention policy in the current data source
func (d *Databases) CreateRP(ctx context.Context, rpX string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) {
return d.CreateRPF(ctx, rpX, rp)
}
// UpdateRP updates a retention policy in the current data source
func (d *Databases) UpdateRP(ctx context.Context, rpX string, rpY string, rp *chronograf.RetentionPolicy) (*chronograf.RetentionPolicy, error) {
return d.UpdateRPF(ctx, rpX, rpY, rp)
}
// DropRP drops a retention policy in the current data source
func (d *Databases) DropRP(ctx context.Context, rpX string, rpY string) error {
return d.DropRPF(ctx, rpX, rpY)
}
// GetMeasurements lists measurements in the current data source
func (d *Databases) GetMeasurements(ctx context.Context, db string, limit, offset int) ([]chronograf.Measurement, error) {
return d.GetMeasurementsF(ctx, db, limit, offset)
}

View File

@ -1,34 +0,0 @@
package mocks
// TODO(desa): resolve kapacitor dependency
//var _ kapacitor.KapaClient = &KapaClient{}
//
//// Client is a mock Kapacitor client
//type KapaClient struct {
// CreateTaskF func(opts client.CreateTaskOptions) (client.Task, error)
// DeleteTaskF func(link client.Link) error
// ListTasksF func(opts *client.ListTasksOptions) ([]client.Task, error)
// TaskF func(link client.Link, opts *client.TaskOptions) (client.Task, error)
// UpdateTaskF func(link client.Link, opts client.UpdateTaskOptions) (client.Task, error)
//}
//
//func (p *KapaClient) CreateTask(opts client.CreateTaskOptions) (client.Task, error) {
// return p.CreateTaskF(opts)
//}
//
//func (p *KapaClient) DeleteTask(link client.Link) error {
// return p.DeleteTaskF(link)
//}
//
//func (p *KapaClient) ListTasks(opts *client.ListTasksOptions) ([]client.Task, error) {
// return p.ListTasksF(opts)
//}
//
//func (p *KapaClient) Task(link client.Link, opts *client.TaskOptions) (client.Task, error) {
// return p.TaskF(link, opts)
//}
//
//func (p *KapaClient) UpdateTask(link client.Link, opts client.UpdateTaskOptions) (client.Task, error) {
// return p.UpdateTaskF(link, opts)
//}

View File

@ -1,37 +0,0 @@
package mocks
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
)
var _ chronograf.LayoutsStore = &LayoutsStore{}
type LayoutsStore struct {
AddF func(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error)
AllF func(ctx context.Context) ([]chronograf.Layout, error)
DeleteF func(ctx context.Context, layout chronograf.Layout) error
GetF func(ctx context.Context, id string) (chronograf.Layout, error)
UpdateF func(ctx context.Context, layout chronograf.Layout) error
}
func (s *LayoutsStore) Add(ctx context.Context, layout chronograf.Layout) (chronograf.Layout, error) {
return s.AddF(ctx, layout)
}
func (s *LayoutsStore) All(ctx context.Context) ([]chronograf.Layout, error) {
return s.AllF(ctx)
}
func (s *LayoutsStore) Delete(ctx context.Context, layout chronograf.Layout) error {
return s.DeleteF(ctx, layout)
}
func (s *LayoutsStore) Get(ctx context.Context, id string) (chronograf.Layout, error) {
return s.GetF(ctx, id)
}
func (s *LayoutsStore) Update(ctx context.Context, layout chronograf.Layout) error {
return s.UpdateF(ctx, layout)
}

View File

@ -1,35 +0,0 @@
package mocks
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
)
type MappingsStore struct {
AddF func(context.Context, *chronograf.Mapping) (*chronograf.Mapping, error)
AllF func(context.Context) ([]chronograf.Mapping, error)
DeleteF func(context.Context, *chronograf.Mapping) error
UpdateF func(context.Context, *chronograf.Mapping) error
GetF func(context.Context, string) (*chronograf.Mapping, error)
}
func (s *MappingsStore) Add(ctx context.Context, m *chronograf.Mapping) (*chronograf.Mapping, error) {
return s.AddF(ctx, m)
}
func (s *MappingsStore) All(ctx context.Context) ([]chronograf.Mapping, error) {
return s.AllF(ctx)
}
func (s *MappingsStore) Delete(ctx context.Context, m *chronograf.Mapping) error {
return s.DeleteF(ctx, m)
}
func (s *MappingsStore) Get(ctx context.Context, id string) (*chronograf.Mapping, error) {
return s.GetF(ctx, id)
}
func (s *MappingsStore) Update(ctx context.Context, m *chronograf.Mapping) error {
return s.UpdateF(ctx, m)
}

View File

@ -1,22 +0,0 @@
package mocks
import (
"context"
"github.com/influxdata/influxdb/v2/chronograf"
)
var _ chronograf.OrganizationConfigStore = &OrganizationConfigStore{}
type OrganizationConfigStore struct {
FindOrCreateF func(ctx context.Context, id string) (*chronograf.OrganizationConfig, error)
PutF func(ctx context.Context, c *chronograf.OrganizationConfig) error
}
func (s *OrganizationConfigStore) FindOrCreate(ctx context.Context, id string) (*chronograf.OrganizationConfig, error) {
return s.FindOrCreateF(ctx, id)
}
func (s *OrganizationConfigStore) Put(ctx context.Context, c *chronograf.OrganizationConfig) error {
return s.PutF(ctx, c)
}

Some files were not shown because too many files have changed in this diff Show More