diff --git a/CHANGELOG.md b/CHANGELOG.md index fd7285d8d4..4254d1a947 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,12 +2,14 @@ ### Bug Fixes ### Features 1. [#1885](https://github.com/influxdata/chronograf/pull/1885): Add `fill` options to data explorer and dashboard queries +1. [#1978](https://github.com/influxdata/chronograf/pull/1978): Support editing kapacitor TICKScript ### UI Improvements ## v1.3.8.1 [unreleased] ### Bug Fixes ### Features + ### UI Improvements ## v1.3.8.0 [2017-09-07] diff --git a/chronograf.go b/chronograf.go index 63c83d6220..070862ed5f 100644 --- a/chronograf.go +++ b/chronograf.go @@ -441,19 +441,32 @@ type SourcesStore interface { Update(context.Context, Source) error } +type DBRP struct { + DB string `json:"db"` + RP string `json:"rp"` +} + // AlertRule represents rules for building a tickscript alerting task type AlertRule struct { - ID string `json:"id,omitempty"` // ID is the unique ID of the alert - TICKScript TICKScript `json:"tickscript"` // TICKScript is the raw tickscript associated with this Alert - Query *QueryConfig `json:"query"` // Query is the filter of data for the alert. - Every string `json:"every"` // Every how often to check for the alerting criteria - Alerts []string `json:"alerts"` // Alerts name all the services to notify (e.g. pagerduty) - AlertNodes []KapacitorNode `json:"alertNodes,omitempty"` // AlertNodes define additional arguments to alerts - Message string `json:"message"` // Message included with alert - Details string `json:"details"` // Details is generally used for the Email alert. If empty will not be added. - Trigger string `json:"trigger"` // Trigger is a type that defines when to trigger the alert - TriggerValues TriggerValues `json:"values"` // Defines the values that cause the alert to trigger - Name string `json:"name"` // Name is the user-defined name for the alert + ID string `json:"id,omitempty"` // ID is the unique ID of the alert + TICKScript TICKScript `json:"tickscript"` // TICKScript is the raw tickscript associated with this Alert + Query *QueryConfig `json:"query"` // Query is the filter of data for the alert. + Every string `json:"every"` // Every how often to check for the alerting criteria + Alerts []string `json:"alerts"` // Alerts name all the services to notify (e.g. pagerduty) + AlertNodes []KapacitorNode `json:"alertNodes,omitempty"` // AlertNodes define additional arguments to alerts + Message string `json:"message"` // Message included with alert + Details string `json:"details"` // Details is generally used for the Email alert. If empty will not be added. + Trigger string `json:"trigger"` // Trigger is a type that defines when to trigger the alert + TriggerValues TriggerValues `json:"values"` // Defines the values that cause the alert to trigger + Name string `json:"name"` // Name is the user-defined name for the alert + Type string `json:"type"` // Represents the task type where stream is data streamed to kapacitor and batch is queried by kapacitor + DBRPs []DBRP `json:"dbrps"` // List of database retention policy pairs the task is allowed to access + Status string `json:"status"` // Represents if this rule is enabled or disabled in kapacitor + Executing bool `json:"executing"` // Whether the task is currently executing + Error string `json:"error"` // Any error encountered when kapacitor executes the task + Created time.Time `json:"created"` // Date the task was first created + Modified time.Time `json:"modified"` // Date the task was last modified + LastEnabled time.Time `json:"last-enabled,omitempty"` // Date the task was last set to status enabled } // TICKScript task to be used by kapacitor diff --git a/kapacitor/client.go b/kapacitor/client.go index b7687f3d94..1e7fbf9f51 100644 --- a/kapacitor/client.go +++ b/kapacitor/client.go @@ -57,6 +57,46 @@ type Task struct { TICKScript chronograf.TICKScript // TICKScript is the running script } +// NewTask creates a task from a kapacitor client task +func NewTask(task *client.Task) *Task { + dbrps := make([]chronograf.DBRP, len(task.DBRPs)) + for i := range task.DBRPs { + dbrps[i].DB = task.DBRPs[i].Database + dbrps[i].RP = task.DBRPs[i].RetentionPolicy + } + + script := chronograf.TICKScript(task.TICKscript) + rule, err := Reverse(script) + if err != nil { + rule = chronograf.AlertRule{ + Name: task.ID, + Query: nil, + } + } + + rule.ID = task.ID + rule.TICKScript = script + rule.Type = task.Type.String() + rule.DBRPs = dbrps + rule.Status = task.Status.String() + rule.Executing = task.Executing + rule.Error = task.Error + rule.Created = task.Created + rule.Modified = task.Modified + rule.LastEnabled = task.LastEnabled + return &Task{ + ID: task.ID, + Href: task.Link.Href, + HrefOutput: HrefOutput(task.ID), + Rule: rule, + } +} + +// HrefOutput returns the link to a kapacitor task httpOut Node given an id +func HrefOutput(ID string) string { + return fmt.Sprintf("/kapacitor/v1/tasks/%s/%s", ID, HTTPEndpoint) +} + // Href returns the link to a kapacitor task given an id func (c *Client) Href(ID string) string { return fmt.Sprintf("/kapacitor/v1/tasks/%s", ID) @@ -64,16 +104,69 @@ func (c *Client) Href(ID string) string { // HrefOutput returns the link to a kapacitor task httpOut Node given an id func (c *Client) HrefOutput(ID string) string { - return fmt.Sprintf("/kapacitor/v1/tasks/%s/%s", ID, HTTPEndpoint) + return HrefOutput(ID) } // Create builds and POSTs a tickscript to kapacitor func (c *Client) Create(ctx context.Context, rule chronograf.AlertRule) (*Task, error) { + var opt *client.CreateTaskOptions + var err error + if rule.Query != nil { + opt, err = c.createFromQueryConfig(rule) + } else { + opt, err = c.createFromTick(rule) + } + + if err != nil { + return nil, err + } + kapa, err := c.kapaClient(c.URL, c.Username, c.Password) if err != nil { return nil, err } + task, err := kapa.CreateTask(*opt) + if err != nil { + return nil, err + } + + return NewTask(&task), nil +} + +func (c *Client) createFromTick(rule chronograf.AlertRule) (*client.CreateTaskOptions, error) { + dbrps := make([]client.DBRP, len(rule.DBRPs)) + for i := range rule.DBRPs { + dbrps[i] = client.DBRP{ + Database: rule.DBRPs[i].DB, + RetentionPolicy: rule.DBRPs[i].RP, + } + } + + status := client.Enabled + if rule.Status != "" { + if err := status.UnmarshalText([]byte(rule.Status)); err != nil { + return nil, err + } + } + + taskType := client.StreamTask + if rule.Type != "stream" { + if err := taskType.UnmarshalText([]byte(rule.Type)); err != nil { + return nil, err + } + } + + return &client.CreateTaskOptions{ + ID: rule.ID, + Type: taskType, + DBRPs: dbrps, + TICKscript: string(rule.TICKScript), + Status: status, + }, nil +} + +func (c *Client) createFromQueryConfig(rule chronograf.AlertRule) (*client.CreateTaskOptions, error) { id, err := c.ID.Generate() if err != nil { return nil, err @@ -85,24 +178,12 @@ func (c *Client) Create(ctx context.Context, rule chronograf.AlertRule) (*Task, } kapaID := Prefix + id - rule.ID = kapaID - task, err := kapa.CreateTask(client.CreateTaskOptions{ + return &client.CreateTaskOptions{ ID: kapaID, Type: toTask(rule.Query), DBRPs: []client.DBRP{{Database: rule.Query.Database, RetentionPolicy: rule.Query.RetentionPolicy}}, TICKscript: string(script), Status: client.Enabled, - }) - if err != nil { - return nil, err - } - - return &Task{ - ID: kapaID, - Href: task.Link.Href, - HrefOutput: c.HrefOutput(kapaID), - TICKScript: script, - Rule: c.Reverse(kapaID, script), }, nil } @@ -130,12 +211,7 @@ func (c *Client) updateStatus(ctx context.Context, href string, status client.Ta return nil, err } - return &Task{ - ID: task.ID, - Href: task.Link.Href, - HrefOutput: c.HrefOutput(task.ID), - TICKScript: chronograf.TICKScript(task.TICKscript), - }, nil + return NewTask(&task), nil } // Disable changes the tickscript status to disabled for a given href. @@ -148,30 +224,6 @@ func (c *Client) Enable(ctx context.Context, href string) (*Task, error) { return c.updateStatus(ctx, href, client.Enabled) } -// AllStatus returns the status of all tasks in kapacitor -func (c *Client) AllStatus(ctx context.Context) (map[string]string, error) { - kapa, err := c.kapaClient(c.URL, c.Username, c.Password) - if err != nil { - return nil, err - } - - // Only get the status, id and link section back - opts := &client.ListTasksOptions{ - Fields: []string{"status"}, - } - tasks, err := kapa.ListTasks(opts) - if err != nil { - return nil, err - } - - taskStatuses := map[string]string{} - for _, task := range tasks { - taskStatuses[task.ID] = task.Status.String() - } - - return taskStatuses, nil -} - // Status returns the status of a task in kapacitor func (c *Client) Status(ctx context.Context, href string) (string, error) { s, err := c.status(ctx, href) @@ -196,7 +248,7 @@ func (c *Client) status(ctx context.Context, href string) (client.TaskStatus, er } // All returns all tasks in kapacitor -func (c *Client) All(ctx context.Context) (map[string]chronograf.AlertRule, error) { +func (c *Client) All(ctx context.Context) (map[string]*Task, error) { kapa, err := c.kapaClient(c.URL, c.Username, c.Password) if err != nil { return nil, err @@ -209,22 +261,11 @@ func (c *Client) All(ctx context.Context) (map[string]chronograf.AlertRule, erro return nil, err } - alerts := map[string]chronograf.AlertRule{} + all := map[string]*Task{} for _, task := range tasks { - script := chronograf.TICKScript(task.TICKscript) - if rule, err := Reverse(script); err != nil { - alerts[task.ID] = chronograf.AlertRule{ - ID: task.ID, - Name: task.ID, - TICKScript: script, - } - } else { - rule.ID = task.ID - rule.TICKScript = script - alerts[task.ID] = rule - } + all[task.ID] = NewTask(&task) } - return alerts, nil + return all, nil } // Reverse builds a chronograf.AlertRule and its QueryConfig from a tickscript @@ -244,19 +285,18 @@ func (c *Client) Reverse(id string, script chronograf.TICKScript) chronograf.Ale } // Get returns a single alert in kapacitor -func (c *Client) Get(ctx context.Context, id string) (chronograf.AlertRule, error) { +func (c *Client) Get(ctx context.Context, id string) (*Task, error) { kapa, err := c.kapaClient(c.URL, c.Username, c.Password) if err != nil { - return chronograf.AlertRule{}, err + return nil, err } href := c.Href(id) task, err := kapa.Task(client.Link{Href: href}, nil) if err != nil { - return chronograf.AlertRule{}, chronograf.ErrAlertNotFound + return nil, chronograf.ErrAlertNotFound } - script := chronograf.TICKScript(task.TICKscript) - return c.Reverse(task.ID, script), nil + return NewTask(&task), nil } // Update changes the tickscript of a given id. @@ -266,30 +306,19 @@ func (c *Client) Update(ctx context.Context, href string, rule chronograf.AlertR return nil, err } - script, err := c.Ticker.Generate(rule) - if err != nil { - return nil, err - } - prevStatus, err := c.status(ctx, href) if err != nil { return nil, err } - // We need to disable the kapacitor task followed by enabling it during update. - opts := client.UpdateTaskOptions{ - TICKscript: string(script), - Status: client.Disabled, - Type: toTask(rule.Query), - DBRPs: []client.DBRP{ - { - Database: rule.Query.Database, - RetentionPolicy: rule.Query.RetentionPolicy, - }, - }, + var opt *client.UpdateTaskOptions + if rule.Query != nil { + opt, err = c.updateFromQueryConfig(rule) + } else { + opt, err = c.updateFromTick(rule) } - task, err := kapa.UpdateTask(client.Link{Href: href}, opts) + task, err := kapa.UpdateTask(client.Link{Href: href}, *opt) if err != nil { return nil, err } @@ -301,12 +330,51 @@ func (c *Client) Update(ctx context.Context, href string, rule chronograf.AlertR } } - return &Task{ - ID: task.ID, - Href: task.Link.Href, - HrefOutput: c.HrefOutput(task.ID), - TICKScript: script, - Rule: c.Reverse(task.ID, script), + return NewTask(&task), nil +} + +func (c *Client) updateFromQueryConfig(rule chronograf.AlertRule) (*client.UpdateTaskOptions, error) { + script, err := c.Ticker.Generate(rule) + if err != nil { + return nil, err + } + + // We need to disable the kapacitor task followed by enabling it during update. + return &client.UpdateTaskOptions{ + TICKscript: string(script), + Status: client.Disabled, + Type: toTask(rule.Query), + DBRPs: []client.DBRP{ + { + Database: rule.Query.Database, + RetentionPolicy: rule.Query.RetentionPolicy, + }, + }, + }, nil +} + +func (c *Client) updateFromTick(rule chronograf.AlertRule) (*client.UpdateTaskOptions, error) { + dbrps := make([]client.DBRP, len(rule.DBRPs)) + for i := range rule.DBRPs { + dbrps[i] = client.DBRP{ + Database: rule.DBRPs[i].DB, + RetentionPolicy: rule.DBRPs[i].RP, + } + } + + taskType := client.StreamTask + if rule.Type != "stream" { + if err := taskType.UnmarshalText([]byte(rule.Type)); err != nil { + return nil, err + } + } + + // We need to disable the kapacitor task followed by enabling it during update. + return &client.UpdateTaskOptions{ + TICKscript: string(rule.TICKScript), + Status: client.Disabled, + Type: taskType, + DBRPs: dbrps, }, nil } diff --git a/kapacitor/client_test.go b/kapacitor/client_test.go index d3850a4e71..1932b1a6c9 100644 --- a/kapacitor/client_test.go +++ b/kapacitor/client_test.go @@ -6,6 +6,7 @@ import ( "reflect" "testing" + "github.com/google/go-cmp/cmp" "github.com/influxdata/chronograf" client "github.com/influxdata/kapacitor/client/v1" ) @@ -66,132 +67,6 @@ type MockID struct { func (m *MockID) Generate() (string, error) { return m.ID, nil } -func TestClient_AllStatus(t *testing.T) { - type fields struct { - URL string - Username string - Password string - ID chronograf.ID - Ticker chronograf.Ticker - kapaClient func(url, username, password string) (KapaClient, error) - } - type args struct { - ctx context.Context - } - kapa := &MockKapa{} - tests := []struct { - name string - fields fields - args args - want map[string]string - wantErr bool - resTask client.Task - resTasks []client.Task - resError error - - createTaskOptions client.CreateTaskOptions - link client.Link - taskOptions *client.TaskOptions - listTasksOptions *client.ListTasksOptions - updateTaskOptions client.UpdateTaskOptions - }{ - { - name: "return no tasks", - fields: fields{ - URL: "http://hill-valley-preservation-society.org", - Username: "ElsaRaven", - Password: "save the clock tower", - kapaClient: func(url, username, password string) (KapaClient, error) { - return kapa, nil - }, - }, - listTasksOptions: &client.ListTasksOptions{ - Fields: []string{"status"}, - }, - want: map[string]string{}, - }, - { - name: "return two tasks", - fields: fields{ - URL: "http://hill-valley-preservation-society.org", - Username: "ElsaRaven", - Password: "save the clock tower", - kapaClient: func(url, username, password string) (KapaClient, error) { - return kapa, nil - }, - }, - listTasksOptions: &client.ListTasksOptions{ - Fields: []string{"status"}, - }, - resTasks: []client.Task{ - client.Task{ - ID: "howdy", - Status: client.Enabled, - }, - client.Task{ - ID: "doody", - Status: client.Disabled, - }, - }, - want: map[string]string{ - "howdy": "enabled", - "doody": "disabled", - }, - }, - { - name: "list task error", - fields: fields{ - URL: "http://hill-valley-preservation-society.org", - Username: "ElsaRaven", - Password: "save the clock tower", - kapaClient: func(url, username, password string) (KapaClient, error) { - return kapa, nil - }, - }, - listTasksOptions: &client.ListTasksOptions{ - Fields: []string{"status"}, - }, - resError: fmt.Errorf("this is an error"), - wantErr: true, - }, - } - for _, tt := range tests { - kapa.ResTask = tt.resTask - kapa.ResTasks = tt.resTasks - kapa.ListError = tt.resError - - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - URL: tt.fields.URL, - Username: tt.fields.Username, - Password: tt.fields.Password, - ID: tt.fields.ID, - Ticker: tt.fields.Ticker, - kapaClient: tt.fields.kapaClient, - } - got, err := c.AllStatus(tt.args.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("Client.AllStatus() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Client.AllStatus() = %v, want %v", got, tt.want) - } - if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { - t.Errorf("Client.AllStatus() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) - } - if !reflect.DeepEqual(kapa.TaskOptions, tt.taskOptions) { - t.Errorf("Client.AllStatus() = taskOptions %v, want %v", kapa.TaskOptions, tt.taskOptions) - } - if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { - t.Errorf("Client.AllStatus() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) - } - if !reflect.DeepEqual(kapa.Link, tt.link) { - t.Errorf("Client.AllStatus() = Link %v, want %v", kapa.Link, tt.link) - } - }) - } -} func TestClient_All(t *testing.T) { type fields struct { @@ -210,7 +85,7 @@ func TestClient_All(t *testing.T) { name string fields fields args args - want map[string]chronograf.AlertRule + want map[string]*Task wantErr bool resTask client.Task resTasks []client.Task @@ -230,7 +105,7 @@ func TestClient_All(t *testing.T) { }, }, listTasksOptions: &client.ListTasksOptions{}, - want: map[string]chronograf.AlertRule{}, + want: map[string]*Task{}, }, { name: "return a non-reversible task", @@ -246,10 +121,19 @@ func TestClient_All(t *testing.T) { Status: client.Enabled, }, }, - want: map[string]chronograf.AlertRule{ - "howdy": chronograf.AlertRule{ - ID: "howdy", - Name: "howdy", + want: map[string]*Task{ + "howdy": &Task{ + ID: "howdy", + + HrefOutput: "/kapacitor/v1/tasks/howdy/output", + Rule: chronograf.AlertRule{ + ID: "howdy", + Name: "howdy", + TICKScript: "", + Type: "unknown TaskType 0", + Status: "enabled", + DBRPs: []chronograf.DBRP{}, + }, TICKScript: "", }, }, @@ -266,6 +150,13 @@ func TestClient_All(t *testing.T) { client.Task{ ID: "rule 1", Status: client.Enabled, + Type: client.StreamTask, + DBRPs: []client.DBRP{ + { + Database: "_internal", + RetentionPolicy: "autogen", + }, + }, TICKscript: `var db = '_internal' var rp = 'monitor' @@ -335,8 +226,344 @@ trigger `, }, }, - want: map[string]chronograf.AlertRule{ - "rule 1": chronograf.AlertRule{ + want: map[string]*Task{ + "rule 1": &Task{ + ID: "rule 1", + + HrefOutput: "/kapacitor/v1/tasks/rule 1/output", + Rule: chronograf.AlertRule{ + DBRPs: []chronograf.DBRP{ + { + + DB: "_internal", + RP: "autogen", + }, + }, + Type: "stream", + Status: "enabled", + ID: "rule 1", + Name: "rule 1", + TICKScript: `var db = '_internal' + +var rp = 'monitor' + +var measurement = 'cq' + +var groupBy = [] + +var whereFilter = lambda: TRUE + +var name = 'rule 1' + +var idVar = name + ':{{.Group}}' + +var message = '' + +var idTag = 'alertID' + +var levelTag = 'level' + +var messageField = 'message' + +var durationField = 'duration' + +var outputDB = 'chronograf' + +var outputRP = 'autogen' + +var outputMeasurement = 'alerts' + +var triggerType = 'threshold' + +var crit = 90000 + +var data = stream + |from() + .database(db) + .retentionPolicy(rp) + .measurement(measurement) + .groupBy(groupBy) + .where(whereFilter) + |eval(lambda: "queryOk") + .as('value') + +var trigger = data + |alert() + .crit(lambda: "value" > crit) + .stateChangesOnly() + .message(message) + .id(idVar) + .idTag(idTag) + .levelTag(levelTag) + .messageField(messageField) + .durationField(durationField) + +trigger + |influxDBOut() + .create() + .database(outputDB) + .retentionPolicy(outputRP) + .measurement(outputMeasurement) + .tag('alertName', name) + .tag('triggerType', triggerType) + +trigger + |httpOut('output') +`, + Trigger: "threshold", + Alerts: []string{}, + TriggerValues: chronograf.TriggerValues{ + Operator: "greater than", + Value: "90000", + }, + Query: &chronograf.QueryConfig{ + Database: "_internal", + RetentionPolicy: "monitor", + Measurement: "cq", + Fields: []chronograf.Field{ + { + Field: "queryOk", + Funcs: []string{}, + }, + }, + GroupBy: chronograf.GroupBy{ + Tags: []string{}, + }, + AreTagsAccepted: false, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + kapa.ResTask = tt.resTask + kapa.ResTasks = tt.resTasks + kapa.ListError = tt.resError + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + URL: tt.fields.URL, + Username: tt.fields.Username, + Password: tt.fields.Password, + ID: tt.fields.ID, + Ticker: tt.fields.Ticker, + kapaClient: tt.fields.kapaClient, + } + got, err := c.All(tt.args.ctx) + if (err != nil) != tt.wantErr { + t.Errorf("Client.All() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !cmp.Equal(got, tt.want) { + t.Errorf("%q. Client.All() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) + } + if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { + t.Errorf("Client.All() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) + } + if !reflect.DeepEqual(kapa.TaskOptions, tt.taskOptions) { + t.Errorf("Client.All() = taskOptions %v, want %v", kapa.TaskOptions, tt.taskOptions) + } + if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { + t.Errorf("Client.All() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) + } + if !reflect.DeepEqual(kapa.Link, tt.link) { + t.Errorf("Client.All() = Link %v, want %v", kapa.Link, tt.link) + } + }) + } +} + +func TestClient_Get(t *testing.T) { + type fields struct { + URL string + Username string + Password string + ID chronograf.ID + Ticker chronograf.Ticker + kapaClient func(url, username, password string) (KapaClient, error) + } + type args struct { + ctx context.Context + id string + } + kapa := &MockKapa{} + tests := []struct { + name string + fields fields + args args + want *Task + wantErr bool + resTask client.Task + resTasks []client.Task + resError error + + createTaskOptions client.CreateTaskOptions + link client.Link + taskOptions *client.TaskOptions + listTasksOptions *client.ListTasksOptions + updateTaskOptions client.UpdateTaskOptions + }{ + { + name: "return no task", + fields: fields{ + kapaClient: func(url, username, password string) (KapaClient, error) { + return kapa, nil + }, + }, + args: args{ + id: "myid", + }, + taskOptions: nil, + wantErr: true, + resError: fmt.Errorf("No such task"), + link: client.Link{ + Href: "/kapacitor/v1/tasks/myid", + }, + }, + { + name: "return non-reversible task", + fields: fields{ + kapaClient: func(url, username, password string) (KapaClient, error) { + return kapa, nil + }, + }, + args: args{ + id: "myid", + }, + taskOptions: nil, + resTask: client.Task{ + ID: "myid", + Status: client.Enabled, + Type: client.StreamTask, + DBRPs: []client.DBRP{ + { + Database: "_internal", + RetentionPolicy: "autogen", + }, + }, + }, + want: &Task{ + ID: "myid", + HrefOutput: "/kapacitor/v1/tasks/myid/output", + Rule: chronograf.AlertRule{ + Type: "stream", + Status: "enabled", + ID: "myid", + Name: "myid", + DBRPs: []chronograf.DBRP{ + { + DB: "_internal", + RP: "autogen", + }, + }, + }, + }, + link: client.Link{ + Href: "/kapacitor/v1/tasks/myid", + }, + }, + { + name: "return reversible task", + fields: fields{ + kapaClient: func(url, username, password string) (KapaClient, error) { + return kapa, nil + }, + }, + args: args{ + id: "rule 1", + }, + taskOptions: nil, + resTask: client.Task{ + ID: "rule 1", + Status: client.Enabled, + Type: client.StreamTask, + DBRPs: []client.DBRP{ + { + Database: "_internal", + RetentionPolicy: "autogen", + }, + }, + TICKscript: `var db = '_internal' + +var rp = 'monitor' + +var measurement = 'cq' + +var groupBy = [] + +var whereFilter = lambda: TRUE + +var name = 'rule 1' + +var idVar = name + ':{{.Group}}' + +var message = '' + +var idTag = 'alertID' + +var levelTag = 'level' + +var messageField = 'message' + +var durationField = 'duration' + +var outputDB = 'chronograf' + +var outputRP = 'autogen' + +var outputMeasurement = 'alerts' + +var triggerType = 'threshold' + +var crit = 90000 + +var data = stream + |from() + .database(db) + .retentionPolicy(rp) + .measurement(measurement) + .groupBy(groupBy) + .where(whereFilter) + |eval(lambda: "queryOk") + .as('value') + +var trigger = data + |alert() + .crit(lambda: "value" > crit) + .stateChangesOnly() + .message(message) + .id(idVar) + .idTag(idTag) + .levelTag(levelTag) + .messageField(messageField) + .durationField(durationField) + +trigger + |influxDBOut() + .create() + .database(outputDB) + .retentionPolicy(outputRP) + .measurement(outputMeasurement) + .tag('alertName', name) + .tag('triggerType', triggerType) + +trigger + |httpOut('output') +`, + }, + want: &Task{ + ID: "rule 1", + HrefOutput: "/kapacitor/v1/tasks/rule 1/output", + Rule: chronograf.AlertRule{ + Type: "stream", + Status: "enabled", + DBRPs: []chronograf.DBRP{ + { + + DB: "_internal", + RP: "autogen", + }, + }, ID: "rule 1", Name: "rule 1", TICKScript: `var db = '_internal' @@ -429,287 +656,6 @@ trigger }, }, }, - }, - } - for _, tt := range tests { - kapa.ResTask = tt.resTask - kapa.ResTasks = tt.resTasks - kapa.ListError = tt.resError - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - URL: tt.fields.URL, - Username: tt.fields.Username, - Password: tt.fields.Password, - ID: tt.fields.ID, - Ticker: tt.fields.Ticker, - kapaClient: tt.fields.kapaClient, - } - got, err := c.All(tt.args.ctx) - if (err != nil) != tt.wantErr { - t.Errorf("Client.All() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Client.All() = %#v, want %#v", got, tt.want) - } - if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { - t.Errorf("Client.All() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) - } - if !reflect.DeepEqual(kapa.TaskOptions, tt.taskOptions) { - t.Errorf("Client.All() = taskOptions %v, want %v", kapa.TaskOptions, tt.taskOptions) - } - if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { - t.Errorf("Client.All() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) - } - if !reflect.DeepEqual(kapa.Link, tt.link) { - t.Errorf("Client.All() = Link %v, want %v", kapa.Link, tt.link) - } - }) - } -} - -func TestClient_Get(t *testing.T) { - type fields struct { - URL string - Username string - Password string - ID chronograf.ID - Ticker chronograf.Ticker - kapaClient func(url, username, password string) (KapaClient, error) - } - type args struct { - ctx context.Context - id string - } - kapa := &MockKapa{} - tests := []struct { - name string - fields fields - args args - want chronograf.AlertRule - wantErr bool - resTask client.Task - resTasks []client.Task - resError error - - createTaskOptions client.CreateTaskOptions - link client.Link - taskOptions *client.TaskOptions - listTasksOptions *client.ListTasksOptions - updateTaskOptions client.UpdateTaskOptions - }{ - { - name: "return no task", - fields: fields{ - kapaClient: func(url, username, password string) (KapaClient, error) { - return kapa, nil - }, - }, - args: args{ - id: "myid", - }, - taskOptions: nil, - wantErr: true, - resError: fmt.Errorf("No such task"), - link: client.Link{ - Href: "/kapacitor/v1/tasks/myid", - }, - }, - { - name: "return non-reversible task", - fields: fields{ - kapaClient: func(url, username, password string) (KapaClient, error) { - return kapa, nil - }, - }, - args: args{ - id: "myid", - }, - taskOptions: nil, - resTask: client.Task{ - ID: "myid", - }, - want: chronograf.AlertRule{ - ID: "myid", - Name: "myid", - }, - link: client.Link{ - Href: "/kapacitor/v1/tasks/myid", - }, - }, - { - name: "return reversible task", - fields: fields{ - kapaClient: func(url, username, password string) (KapaClient, error) { - return kapa, nil - }, - }, - args: args{ - id: "rule 1", - }, - taskOptions: nil, - resTask: client.Task{ - ID: "rule 1", - TICKscript: `var db = '_internal' - -var rp = 'monitor' - -var measurement = 'cq' - -var groupBy = [] - -var whereFilter = lambda: TRUE - -var name = 'rule 1' - -var idVar = name + ':{{.Group}}' - -var message = '' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90000 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |eval(lambda: "queryOk") - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - -trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - }, - want: chronograf.AlertRule{ - ID: "rule 1", - Name: "rule 1", - TICKScript: `var db = '_internal' - -var rp = 'monitor' - -var measurement = 'cq' - -var groupBy = [] - -var whereFilter = lambda: TRUE - -var name = 'rule 1' - -var idVar = name + ':{{.Group}}' - -var message = '' - -var idTag = 'alertID' - -var levelTag = 'level' - -var messageField = 'message' - -var durationField = 'duration' - -var outputDB = 'chronograf' - -var outputRP = 'autogen' - -var outputMeasurement = 'alerts' - -var triggerType = 'threshold' - -var crit = 90000 - -var data = stream - |from() - .database(db) - .retentionPolicy(rp) - .measurement(measurement) - .groupBy(groupBy) - .where(whereFilter) - |eval(lambda: "queryOk") - .as('value') - -var trigger = data - |alert() - .crit(lambda: "value" > crit) - .stateChangesOnly() - .message(message) - .id(idVar) - .idTag(idTag) - .levelTag(levelTag) - .messageField(messageField) - .durationField(durationField) - -trigger - |influxDBOut() - .create() - .database(outputDB) - .retentionPolicy(outputRP) - .measurement(outputMeasurement) - .tag('alertName', name) - .tag('triggerType', triggerType) - -trigger - |httpOut('output') -`, - Trigger: "threshold", - Alerts: []string{}, - TriggerValues: chronograf.TriggerValues{ - Operator: "greater than", - Value: "90000", - }, - Query: &chronograf.QueryConfig{ - Database: "_internal", - RetentionPolicy: "monitor", - Measurement: "cq", - Fields: []chronograf.Field{ - { - Field: "queryOk", - Funcs: []string{}, - }, - }, - GroupBy: chronograf.GroupBy{ - Tags: []string{}, - }, - AreTagsAccepted: false, - }, - }, link: client.Link{ Href: "/kapacitor/v1/tasks/rule 1", }, @@ -733,8 +679,9 @@ trigger t.Errorf("Client.Get() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Client.Get() =\n%#v\nwant\n%#v", got, tt.want) + + if !cmp.Equal(got, tt.want) { + t.Errorf("%q. Client.All() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) } if !reflect.DeepEqual(kapa.ListTasksOptions, tt.listTasksOptions) { t.Errorf("Client.Get() = listTasksOptions %v, want %v", kapa.ListTasksOptions, tt.listTasksOptions) @@ -793,6 +740,13 @@ func TestClient_updateStatus(t *testing.T) { resTask: client.Task{ ID: "howdy", Status: client.Disabled, + Type: client.StreamTask, + DBRPs: []client.DBRP{ + { + Database: "db", + RetentionPolicy: "rp", + }, + }, Link: client.Link{ Href: "/kapacitor/v1/tasks/howdy", }, @@ -805,7 +759,19 @@ func TestClient_updateStatus(t *testing.T) { ID: "howdy", Href: "/kapacitor/v1/tasks/howdy", HrefOutput: "/kapacitor/v1/tasks/howdy/output", - Rule: chronograf.AlertRule{}, + Rule: chronograf.AlertRule{ + ID: "howdy", + Name: "howdy", + Type: "stream", + DBRPs: []chronograf.DBRP{ + { + + DB: "db", + RP: "rp", + }, + }, + Status: "disabled", + }, }, }, { @@ -842,7 +808,14 @@ func TestClient_updateStatus(t *testing.T) { status: client.Enabled, }, resTask: client.Task{ - ID: "howdy", + ID: "howdy", + Type: client.StreamTask, + DBRPs: []client.DBRP{ + { + Database: "db", + RetentionPolicy: "rp", + }, + }, Status: client.Enabled, Link: client.Link{ Href: "/kapacitor/v1/tasks/howdy", @@ -856,7 +829,19 @@ func TestClient_updateStatus(t *testing.T) { ID: "howdy", Href: "/kapacitor/v1/tasks/howdy", HrefOutput: "/kapacitor/v1/tasks/howdy/output", - Rule: chronograf.AlertRule{}, + Rule: chronograf.AlertRule{ + ID: "howdy", + Name: "howdy", + Type: "stream", + DBRPs: []chronograf.DBRP{ + { + + DB: "db", + RP: "rp", + }, + }, + Status: "enabled", + }, }, }, } @@ -878,8 +863,8 @@ func TestClient_updateStatus(t *testing.T) { t.Errorf("Client.updateStatus() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Client.updateStatus() = %v, want %v", got, tt.want) + if !cmp.Equal(got, tt.want) { + t.Errorf("%q. Client.updateStatus() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) } if !reflect.DeepEqual(kapa.UpdateTaskOptions, tt.updateTaskOptions) { t.Errorf("Client.updateStatus() = %v, want %v", kapa.UpdateTaskOptions, tt.updateTaskOptions) @@ -968,7 +953,14 @@ func TestClient_Update(t *testing.T) { }, }, resTask: client.Task{ - ID: "howdy", + ID: "howdy", + Type: client.StreamTask, + DBRPs: []client.DBRP{ + { + Database: "db", + RetentionPolicy: "rp", + }, + }, Status: client.Enabled, Link: client.Link{ Href: "/kapacitor/v1/tasks/howdy", @@ -990,8 +982,17 @@ func TestClient_Update(t *testing.T) { Href: "/kapacitor/v1/tasks/howdy", HrefOutput: "/kapacitor/v1/tasks/howdy/output", Rule: chronograf.AlertRule{ - ID: "howdy", - Name: "howdy", + DBRPs: []chronograf.DBRP{ + { + + DB: "db", + RP: "rp", + }, + }, + Status: "enabled", + Type: "stream", + ID: "howdy", + Name: "howdy", }, }, wantStatus: client.Enabled, @@ -1016,7 +1017,14 @@ func TestClient_Update(t *testing.T) { }, }, resTask: client.Task{ - ID: "howdy", + ID: "howdy", + Type: client.StreamTask, + DBRPs: []client.DBRP{ + { + Database: "db", + RetentionPolicy: "rp", + }, + }, Status: client.Disabled, Link: client.Link{ Href: "/kapacitor/v1/tasks/howdy", @@ -1040,6 +1048,15 @@ func TestClient_Update(t *testing.T) { Rule: chronograf.AlertRule{ ID: "howdy", Name: "howdy", + DBRPs: []chronograf.DBRP{ + { + + DB: "db", + RP: "rp", + }, + }, + Status: "disabled", + Type: "stream", }, }, wantStatus: client.Disabled, @@ -1062,8 +1079,8 @@ func TestClient_Update(t *testing.T) { t.Errorf("Client.Update() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Client.Update() =\n%#+v\n, want\n%#+v\n", got, tt.want) + if !cmp.Equal(got, tt.want) { + t.Errorf("%q. Client.Update() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) } if !reflect.DeepEqual(kapa.UpdateTaskOptions, tt.updateTaskOptions) { t.Errorf("Client.Update() = %v, want %v", kapa.UpdateTaskOptions, tt.updateTaskOptions) @@ -1123,6 +1140,13 @@ func TestClient_Create(t *testing.T) { resTask: client.Task{ ID: "chronograf-v1-howdy", Status: client.Enabled, + Type: client.StreamTask, + DBRPs: []client.DBRP{ + { + Database: "db", + RetentionPolicy: "rp", + }, + }, Link: client.Link{ Href: "/kapacitor/v1/tasks/chronograf-v1-howdy", }, @@ -1144,13 +1168,22 @@ func TestClient_Create(t *testing.T) { Href: "/kapacitor/v1/tasks/chronograf-v1-howdy", HrefOutput: "/kapacitor/v1/tasks/chronograf-v1-howdy/output", Rule: chronograf.AlertRule{ - ID: "chronograf-v1-howdy", - Name: "chronograf-v1-howdy", + Type: "stream", + DBRPs: []chronograf.DBRP{ + { + + DB: "db", + RP: "rp", + }, + }, + Status: "enabled", + ID: "chronograf-v1-howdy", + Name: "chronograf-v1-howdy", }, }, }, { - name: "create alert rule", + name: "create alert rule error", fields: fields{ kapaClient: func(url, username, password string) (KapaClient, error) { return kapa, nil @@ -1203,8 +1236,8 @@ func TestClient_Create(t *testing.T) { t.Errorf("Client.Create() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Client.Create() =\n%v\n, want\n%v\n", got, tt.want) + if !cmp.Equal(got, tt.want) { + t.Errorf("%q. Client.Create() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) } if !reflect.DeepEqual(kapa.CreateTaskOptions, tt.createTaskOptions) { t.Errorf("Client.Create() = %v, want %v", kapa.CreateTaskOptions, tt.createTaskOptions) diff --git a/server/kapacitors.go b/server/kapacitors.go index 488d6f586c..a0a838cb31 100644 --- a/server/kapacitors.go +++ b/server/kapacitors.go @@ -319,7 +319,7 @@ func (h *Service) KapacitorRulesPost(w http.ResponseWriter, r *http.Request) { Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } - res := newAlertResponse(task.Rule, task.TICKScript, task.Href, task.HrefOutput, "enabled", srv.SrcID, srv.ID) + res := newAlertResponse(task, srv.SrcID, srv.ID) w.Header().Add("Location", res.Links.Self) encodeJSON(w, http.StatusCreated, res, h.Logger) } @@ -332,22 +332,18 @@ type alertLinks struct { type alertResponse struct { chronograf.AlertRule - TICKScript string `json:"tickscript"` - Status string `json:"status"` - Links alertLinks `json:"links"` + Links alertLinks `json:"links"` } // newAlertResponse formats task into an alertResponse -func newAlertResponse(rule chronograf.AlertRule, tickScript chronograf.TICKScript, href, hrefOutput string, status string, srcID, kapaID int) alertResponse { - res := alertResponse{ - AlertRule: rule, +func newAlertResponse(task *kapa.Task, srcID, kapaID int) *alertResponse { + res := &alertResponse{ + AlertRule: task.Rule, Links: alertLinks{ - Self: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/rules/%s", srcID, kapaID, rule.ID), - Kapacitor: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/proxy?path=%s", srcID, kapaID, url.QueryEscape(href)), - Output: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/proxy?path=%s", srcID, kapaID, url.QueryEscape(hrefOutput)), + Self: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/rules/%s", srcID, kapaID, task.ID), + Kapacitor: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/proxy?path=%s", srcID, kapaID, url.QueryEscape(task.Href)), + Output: fmt.Sprintf("/chronograf/v1/sources/%d/kapacitors/%d/proxy?path=%s", srcID, kapaID, url.QueryEscape(task.HrefOutput)), }, - TICKScript: string(tickScript), - Status: status, } if res.Alerts == nil { @@ -471,7 +467,7 @@ func (h *Service) KapacitorRulesPut(w http.ResponseWriter, r *http.Request) { Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } - res := newAlertResponse(task.Rule, task.TICKScript, task.Href, task.HrefOutput, "enabled", srv.SrcID, srv.ID) + res := newAlertResponse(task, srv.SrcID, srv.ID) encodeJSON(w, http.StatusOK, res, h.Logger) } @@ -523,7 +519,7 @@ func (h *Service) KapacitorRulesStatus(w http.ResponseWriter, r *http.Request) { } // Check if the rule exists and is scoped correctly - alert, err := c.Get(ctx, tid) + _, err = c.Get(ctx, tid) if err != nil { if err == chronograf.ErrAlertNotFound { notFound(w, id, h.Logger) @@ -545,7 +541,7 @@ func (h *Service) KapacitorRulesStatus(w http.ResponseWriter, r *http.Request) { return } - res := newAlertResponse(alert, task.TICKScript, task.Href, task.HrefOutput, req.Status, srv.SrcID, srv.ID) + res := newAlertResponse(task, srv.SrcID, srv.ID) encodeJSON(w, http.StatusOK, res, h.Logger) } @@ -571,35 +567,24 @@ func (h *Service) KapacitorRulesGet(w http.ResponseWriter, r *http.Request) { } c := kapa.NewClient(srv.URL, srv.Username, srv.Password) - rules, err := c.All(ctx) - if err != nil { - Error(w, http.StatusInternalServerError, err.Error(), h.Logger) - return - } - statuses, err := c.AllStatus(ctx) + tasks, err := c.All(ctx) if err != nil { Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } res := allAlertsResponse{ - Rules: []alertResponse{}, + Rules: []*alertResponse{}, } - for _, rule := range rules { - status, ok := statuses[rule.ID] - // The defined rule is not actually in kapacitor - if !ok { - continue - } - - ar := newAlertResponse(rule, rule.TICKScript, c.Href(rule.ID), c.HrefOutput(rule.ID), status, srv.SrcID, srv.ID) + for _, task := range tasks { + ar := newAlertResponse(task, srv.SrcID, srv.ID) res.Rules = append(res.Rules, ar) } encodeJSON(w, http.StatusOK, res, h.Logger) } type allAlertsResponse struct { - Rules []alertResponse `json:"rules"` + Rules []*alertResponse `json:"rules"` } // KapacitorRulesID retrieves specific task @@ -627,7 +612,7 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) { c := kapa.NewClient(srv.URL, srv.Username, srv.Password) // Check if the rule exists within scope - rule, err := c.Get(ctx, tid) + task, err := c.Get(ctx, tid) if err != nil { if err == chronograf.ErrAlertNotFound { notFound(w, id, h.Logger) @@ -636,13 +621,8 @@ func (h *Service) KapacitorRulesID(w http.ResponseWriter, r *http.Request) { Error(w, http.StatusInternalServerError, err.Error(), h.Logger) return } - status, err := c.Status(ctx, c.Href(rule.ID)) - if err != nil { - Error(w, http.StatusInternalServerError, err.Error(), h.Logger) - return - } - res := newAlertResponse(rule, rule.TICKScript, c.Href(rule.ID), c.HrefOutput(rule.ID), status, srv.SrcID, srv.ID) + res := newAlertResponse(task, srv.SrcID, srv.ID) encodeJSON(w, http.StatusOK, res, h.Logger) } diff --git a/server/kapacitors_test.go b/server/kapacitors_test.go index 8aa7328deb..2a55ea637c 100644 --- a/server/kapacitors_test.go +++ b/server/kapacitors_test.go @@ -86,15 +86,23 @@ func Test_KapacitorRulesGet(t *testing.T) { "/chronograf/v1/sources/1/kapacitors/1/rules", []chronograf.AlertRule{ { - ID: "cpu_alert", - Name: "cpu_alert", + ID: "cpu_alert", + Name: "cpu_alert", + Status: "enabled", + Type: "stream", + DBRPs: []chronograf.DBRP{{DB: "telegraf", RP: "autogen"}}, + TICKScript: tickScript, }, }, []chronograf.AlertRule{ { - ID: "cpu_alert", - Name: "cpu_alert", - Alerts: []string{}, + ID: "cpu_alert", + Name: "cpu_alert", + Status: "enabled", + Type: "stream", + DBRPs: []chronograf.DBRP{{DB: "telegraf", RP: "autogen"}}, + Alerts: []string{}, + TICKScript: tickScript, }, }, }, @@ -125,6 +133,13 @@ func Test_KapacitorRulesGet(t *testing.T) { "id": task.ID, "script": tickScript, "status": "enabled", + "type": "stream", + "dbrps": []chronograf.DBRP{ + { + DB: "telegraf", + RP: "autogen", + }, + }, "link": map[string]interface{}{ "rel": "self", "href": "/kapacitor/v1/tasks/cpu_alert", @@ -196,9 +211,7 @@ func Test_KapacitorRulesGet(t *testing.T) { frame := struct { Rules []struct { chronograf.AlertRule - TICKScript json.RawMessage `json:"tickscript"` - Status json.RawMessage `json:"status"` - Links json.RawMessage `json:"links"` + Links json.RawMessage `json:"links"` } `json:"rules"` }{} diff --git a/server/swagger.json b/server/swagger.json index 79293669df..baed5e911f 100644 --- a/server/swagger.json +++ b/server/swagger.json @@ -71,7 +71,8 @@ "Location": { "type": "string", "format": "url", - "description": "Location of the newly created data source resource." + "description": + "Location of the newly created data source resource." } }, "schema": { @@ -103,7 +104,8 @@ "description": "These data sources store time series data.", "responses": { "200": { - "description": "Data source used to supply time series information.", + "description": + "Data source used to supply time series information.", "schema": { "$ref": "#/definitions/Source" } @@ -151,7 +153,8 @@ } }, "404": { - "description": "Happens when trying to access a non-existent data source.", + "description": + "Happens when trying to access a non-existent data source.", "schema": { "$ref": "#/definitions/Error" } @@ -175,7 +178,8 @@ "required": true } ], - "summary": "This specific data source will be removed from the data store. All associated kapacitor resources and kapacitor rules resources are also removed.", + "summary": + "This specific data source will be removed from the data store. All associated kapacitor resources and kapacitor rules resources are also removed.", "responses": { "204": { "description": "data source has been removed" @@ -242,7 +246,8 @@ "/sources/{id}/proxy": { "post": { "tags": ["sources", "proxy"], - "description": "Query the backend time series data source and return the response according to `format`", + "description": + "Query the backend time series data source and return the response according to `format`", "parameters": [ { "name": "id", @@ -263,13 +268,15 @@ ], "responses": { "200": { - "description": "Result of the query from the backend time series data source.", + "description": + "Result of the query from the backend time series data source.", "schema": { "$ref": "#/definitions/ProxyResponse" } }, "400": { - "description": "Any query that results in a data source error (syntax error, etc) will cause this response. The error message will be passed back in the body", + "description": + "Any query that results in a data source error (syntax error, etc) will cause this response. The error message will be passed back in the body", "schema": { "$ref": "#/definitions/Error" } @@ -327,20 +334,23 @@ { "name": "rp", "in": "query", - "description": "Sets the target retention policy for the write. InfluxDB writes to the DEFAULT retention policy if you do not specify a retention policy.", + "description": + "Sets the target retention policy for the write. InfluxDB writes to the DEFAULT retention policy if you do not specify a retention policy.", "type": "string" }, { "name": "precision", "in": "query", - "description": "Sets the precision for the supplied Unix time values. InfluxDB assumes that timestamps are in nanoseconds if you do not specify precision.", + "description": + "Sets the precision for the supplied Unix time values. InfluxDB assumes that timestamps are in nanoseconds if you do not specify precision.", "type": "string", "enum": ["ns", "u", "ms", "s", "m", "h"] }, { "name": "consistency", "in": "query", - "description": "Sets the write consistency for the point. InfluxDB assumes that the write consistency is one if you do not specify consistency. See the InfluxEnterprise documentation for detailed descriptions of each consistency option.", + "description": + "Sets the write consistency for the point. InfluxDB assumes that the write consistency is one if you do not specify consistency. See the InfluxEnterprise documentation for detailed descriptions of each consistency option.", "type": "string", "enum": ["any", "one", "quorum", "all"] } @@ -350,7 +360,8 @@ "description": "Points written successfuly to database." }, "400": { - "description": "Any query that results in a data source error (syntax error, etc) will cause this response. The error message will be passed back in the body", + "description": + "Any query that results in a data source error (syntax error, etc) will cause this response. The error message will be passed back in the body", "schema": { "$ref": "#/definitions/Error" } @@ -572,7 +583,8 @@ } }, "404": { - "description": "Happens when trying to access a non-existent user or source.", + "description": + "Happens when trying to access a non-existent user or source.", "schema": { "$ref": "#/definitions/Error" } @@ -626,7 +638,8 @@ "/sources/{id}/roles": { "get": { "tags": ["sources", "users", "roles"], - "summary": "Retrieve all data sources roles. Available only in Influx Enterprise", + "summary": + "Retrieve all data sources roles. Available only in Influx Enterprise", "parameters": [ { "name": "id", @@ -784,7 +797,8 @@ } }, "404": { - "description": "Happens when trying to access a non-existent role or source.", + "description": + "Happens when trying to access a non-existent role or source.", "schema": { "$ref": "#/definitions/Error" } @@ -1199,7 +1213,8 @@ "Location": { "type": "string", "format": "url", - "description": "Location of the newly created kapacitor resource." + "description": + "Location of the newly created kapacitor resource." } }, "schema": { @@ -1293,7 +1308,8 @@ } }, "404": { - "description": "Happens when trying to access a non-existent data source or kapacitor.", + "description": + "Happens when trying to access a non-existent data source or kapacitor.", "schema": { "$ref": "#/definitions/Error" } @@ -1325,7 +1341,8 @@ } ], "summary": "Remove Kapacitor backend", - "description": "This specific kapacitor will be removed. All associated rule resources will also be removed from the store.", + "description": + "This specific kapacitor will be removed. All associated rule resources will also be removed from the store.", "responses": { "204": { "description": "kapacitor has been removed." @@ -1367,7 +1384,8 @@ ], "responses": { "200": { - "description": "All alert rules for this specific kapacitor are returned", + "description": + "All alert rules for this specific kapacitor are returned", "schema": { "$ref": "#/definitions/Rules" } @@ -1421,7 +1439,8 @@ "Location": { "type": "string", "format": "url", - "description": "Location of the newly created kapacitor rule resource." + "description": + "Location of the newly created kapacitor rule resource." } }, "schema": { @@ -1435,13 +1454,15 @@ } }, "422": { - "description": "Source ID , Kapacitor ID or alert are unprocessable", + "description": + "Source ID , Kapacitor ID or alert are unprocessable", "schema": { "$ref": "#/definitions/Error" } }, "default": { - "description": "Internal server error; generally a problem creating alert in kapacitor", + "description": + "Internal server error; generally a problem creating alert in kapacitor", "schema": { "$ref": "#/definitions/Error" } @@ -1541,7 +1562,8 @@ } }, "404": { - "description": "Happens when trying to access a non-existent data source, kapacitor, or rule.", + "description": + "Happens when trying to access a non-existent data source, kapacitor, or rule.", "schema": { "$ref": "#/definitions/Error" } @@ -1602,7 +1624,8 @@ "/sources/{id}/kapacitors/{kapa_id}/proxy": { "get": { "tags": ["sources", "kapacitors", "proxy"], - "description": "GET to `path` of kapacitor. The response and status code from kapacitor is directly returned.", + "description": + "GET to `path` of kapacitor. The response and status code from kapacitor is directly returned.", "parameters": [ { "name": "id", @@ -1622,7 +1645,8 @@ "name": "path", "in": "query", "type": "string", - "description": "The kapacitor API path to use in the proxy redirect", + "description": + "The kapacitor API path to use in the proxy redirect", "required": true } ], @@ -1646,7 +1670,8 @@ }, "delete": { "tags": ["sources", "kapacitors", "proxy"], - "description": "DELETE to `path` of kapacitor. The response and status code from kapacitor is directly returned.", + "description": + "DELETE to `path` of kapacitor. The response and status code from kapacitor is directly returned.", "parameters": [ { "name": "id", @@ -1666,7 +1691,8 @@ "name": "path", "in": "query", "type": "string", - "description": "The kapacitor API path to use in the proxy redirect", + "description": + "The kapacitor API path to use in the proxy redirect", "required": true } ], @@ -1690,7 +1716,8 @@ }, "patch": { "tags": ["sources", "kapacitors", "proxy"], - "description": "PATCH body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", + "description": + "PATCH body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", "parameters": [ { "name": "id", @@ -1710,7 +1737,8 @@ "name": "path", "in": "query", "type": "string", - "description": "The kapacitor API path to use in the proxy redirect", + "description": + "The kapacitor API path to use in the proxy redirect", "required": true }, { @@ -1743,7 +1771,8 @@ }, "post": { "tags": ["sources", "kapacitors", "proxy"], - "description": "POST body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", + "description": + "POST body directly to configured kapacitor. The response and status code from kapacitor is directly returned.", "parameters": [ { "name": "id", @@ -1763,7 +1792,8 @@ "name": "path", "in": "query", "type": "string", - "description": "The kapacitor API path to use in the proxy redirect", + "description": + "The kapacitor API path to use in the proxy redirect", "required": true }, { @@ -1799,7 +1829,8 @@ "get": { "tags": ["layouts", "mappings"], "summary": "Mappings between app names and measurements", - "description": "Mappings provide a means to alias measurement names found within a telegraf database and application layouts found within Chronograf\n", + "description": + "Mappings provide a means to alias measurement names found within a telegraf database and application layouts found within Chronograf\n", "responses": { "200": { "description": "An array of mappings", @@ -1844,7 +1875,8 @@ "collectionFormat": "multi" } ], - "description": "Layouts are a collection of `Cells` that visualize time-series data.\n", + "description": + "Layouts are a collection of `Cells` that visualize time-series data.\n", "responses": { "200": { "description": "An array of layouts", @@ -1867,7 +1899,8 @@ { "name": "layout", "in": "body", - "description": "Defines the layout and queries of the cells within the layout.", + "description": + "Defines the layout and queries of the cells within the layout.", "schema": { "$ref": "#/definitions/Layout" } @@ -1908,8 +1941,10 @@ "required": true } ], - "summary": "Specific pre-configured layout containing cells and queries.", - "description": "layouts will hold information about how to layout the page of graphs.\n", + "summary": + "Specific pre-configured layout containing cells and queries.", + "description": + "layouts will hold information about how to layout the page of graphs.\n", "responses": { "200": { "description": "Returns the specified layout containing `cells`.", @@ -1984,13 +2019,15 @@ ], "responses": { "200": { - "description": "Layout has been replaced and the new layout is returned.", + "description": + "Layout has been replaced and the new layout is returned.", "schema": { "$ref": "#/definitions/Layout" } }, "404": { - "description": "Happens when trying to access a non-existent layout.", + "description": + "Happens when trying to access a non-existent layout.", "schema": { "$ref": "#/definitions/Error" } @@ -2043,7 +2080,8 @@ "Location": { "type": "string", "format": "url", - "description": "Location of the newly created dashboard resource." + "description": + "Location of the newly created dashboard resource." } }, "schema": { @@ -2072,10 +2110,12 @@ } ], "summary": "Specific dashboard", - "description": "Dashboards contain visual display information as well as links to queries", + "description": + "Dashboards contain visual display information as well as links to queries", "responses": { "200": { - "description": "Returns the specified dashboard with links to queries.", + "description": + "Returns the specified dashboard with links to queries.", "schema": { "$ref": "#/definitions/Dashboard" } @@ -2147,13 +2187,15 @@ ], "responses": { "200": { - "description": "Dashboard has been replaced and the new dashboard is returned.", + "description": + "Dashboard has been replaced and the new dashboard is returned.", "schema": { "$ref": "#/definitions/Dashboard" } }, "404": { - "description": "Happens when trying to access a non-existent dashboard.", + "description": + "Happens when trying to access a non-existent dashboard.", "schema": { "$ref": "#/definitions/Error" } @@ -2169,7 +2211,8 @@ "patch": { "tags": ["layouts"], "summary": "Update dashboard information.", - "description": "Update either the dashboard name or the dashboard cells", + "description": + "Update either the dashboard name or the dashboard cells", "parameters": [ { "name": "id", @@ -2181,7 +2224,8 @@ { "name": "config", "in": "body", - "description": "dashboard configuration update parameters. Must be either name or cells", + "description": + "dashboard configuration update parameters. Must be either name or cells", "schema": { "$ref": "#/definitions/Dashboard" }, @@ -2190,13 +2234,15 @@ ], "responses": { "200": { - "description": "Dashboard has been updated and the new dashboard is returned.", + "description": + "Dashboard has been updated and the new dashboard is returned.", "schema": { "$ref": "#/definitions/Dashboard" } }, "404": { - "description": "Happens when trying to access a non-existent dashboard.", + "description": + "Happens when trying to access a non-existent dashboard.", "schema": { "$ref": "#/definitions/Error" } @@ -2240,7 +2286,8 @@ "shardDuration": "7d", "default": true, "links": { - "self": "/chronograf/v1/ousrces/1/dbs/NOAA_water_database/rps/liquid" + "self": + "/chronograf/v1/ousrces/1/dbs/NOAA_water_database/rps/liquid" } } ], @@ -2337,11 +2384,13 @@ "url": { "type": "string", "format": "url", - "description": "URL for the kapacitor backend (e.g. http://localhost:9092)" + "description": + "URL for the kapacitor backend (e.g. http://localhost:9092)" }, "active": { "type": "boolean", - "description": "Indicates whether the kapacitor is the current kapacitor being used for a source" + "description": + "Indicates whether the kapacitor is the current kapacitor being used for a source" }, "links": { "type": "object", @@ -2353,12 +2402,14 @@ }, "proxy": { "type": "string", - "description": "URL location of proxy endpoint for this kapacitor", + "description": + "URL location of proxy endpoint for this kapacitor", "format": "url" }, "rules": { "type": "string", - "description": "URL location of rules endpoint for this kapacitor", + "description": + "URL location of rules endpoint for this kapacitor", "format": "url" } } @@ -2366,7 +2417,8 @@ } }, "KapacitorProxy": { - "description": "Entirely used as the body for the request to the kapacitor backend.", + "description": + "Entirely used as the body for the request to the kapacitor backend.", "type": "object" }, "KapacitorProxyResponse": { @@ -2392,7 +2444,8 @@ "type": "string", "description": "InfluxQL statement to be analyzed", "example": { - "query": "select max(usage_system) from telegraf.autogen.cpu group by time(10m)" + "query": + "select max(usage_system) from telegraf.autogen.cpu group by time(10m)" } } } @@ -2403,7 +2456,8 @@ "query": { "type": "string", "example": { - "query": "select max(usage_system) from telegraf.autogen.cpu group by time(10m)" + "query": + "select max(usage_system) from telegraf.autogen.cpu group by time(10m)" }, "description": "InfluxQL statement to be analyzed" }, @@ -2563,12 +2617,14 @@ }, "KapacitorProperty": { "type": "object", - "description": "Represents a property attached to a node in the kapacitor TICKscript graph", + "description": + "Represents a property attached to a node in the kapacitor TICKscript graph", "required": ["name"], "properties": { "name": { "type": "string", - "description": "Name of the kapacitor property e.g. channel for a slack ndoe" + "description": + "Name of the kapacitor property e.g. channel for a slack ndoe" }, "args": { "type": "array", @@ -2624,7 +2680,8 @@ }, "default": { "type": "boolean", - "description": "Indicates whether this retention policy should be the default" + "description": + "Indicates whether this retention policy should be the default" }, "links": { "type": "object", @@ -2677,12 +2734,28 @@ "value": "10" }, "name": "Untitled Rule", - "tickscript": "var db = 'telegraf'\n\nvar rp = 'autogen'\n\nvar measurement = 'cpu'\n\nvar groupBy = []\n\nvar whereFilter = lambda: TRUE\n\nvar period = 10m\n\nvar every = 30s\n\nvar name = 'Untitled Rule'\n\nvar idVar = name + ':{{.Group}}'\n\nvar message = 'too much spam'\n\nvar idTag = 'alertID'\n\nvar levelTag = 'level'\n\nvar messageField = 'message'\n\nvar durationField = 'duration'\n\nvar outputDB = 'chronograf'\n\nvar outputRP = 'autogen'\n\nvar outputMeasurement = 'alerts'\n\nvar triggerType = 'threshold'\n\nvar details = 'muh body'\n\nvar crit = 10\n\nvar data = stream\n |from()\n .database(db)\n .retentionPolicy(rp)\n .measurement(measurement)\n .groupBy(groupBy)\n .where(whereFilter)\n |window()\n .period(period)\n .every(every)\n .align()\n |max('usage_system')\n .as('value')\n\nvar trigger = data\n |alert()\n .crit(lambda: \"value\" > crit)\n .stateChangesOnly()\n .message(message)\n .id(idVar)\n .idTag(idTag)\n .levelTag(levelTag)\n .messageField(messageField)\n .durationField(durationField)\n .details(details)\n .alerta()\n\ntrigger\n |influxDBOut()\n .create()\n .database(outputDB)\n .retentionPolicy(outputRP)\n .measurement(outputMeasurement)\n .tag('alertName', name)\n .tag('triggerType', triggerType)\n\ntrigger\n |httpOut('output')\n", - "status": "enabled", + "tickscript": + "var db = 'telegraf'\n\nvar rp = 'autogen'\n\nvar measurement = 'cpu'\n\nvar groupBy = []\n\nvar whereFilter = lambda: TRUE\n\nvar period = 10m\n\nvar every = 30s\n\nvar name = 'Untitled Rule'\n\nvar idVar = name + ':{{.Group}}'\n\nvar message = 'too much spam'\n\nvar idTag = 'alertID'\n\nvar levelTag = 'level'\n\nvar messageField = 'message'\n\nvar durationField = 'duration'\n\nvar outputDB = 'chronograf'\n\nvar outputRP = 'autogen'\n\nvar outputMeasurement = 'alerts'\n\nvar triggerType = 'threshold'\n\nvar details = 'muh body'\n\nvar crit = 10\n\nvar data = stream\n |from()\n .database(db)\n .retentionPolicy(rp)\n .measurement(measurement)\n .groupBy(groupBy)\n .where(whereFilter)\n |window()\n .period(period)\n .every(every)\n .align()\n |max('usage_system')\n .as('value')\n\nvar trigger = data\n |alert()\n .crit(lambda: \"value\" > crit)\n .stateChangesOnly()\n .message(message)\n .id(idVar)\n .idTag(idTag)\n .levelTag(levelTag)\n .messageField(messageField)\n .durationField(durationField)\n .details(details)\n .alerta()\n\ntrigger\n |influxDBOut()\n .create()\n .database(outputDB)\n .retentionPolicy(outputRP)\n .measurement(outputMeasurement)\n .tag('alertName', name)\n .tag('triggerType', triggerType)\n\ntrigger\n |httpOut('output')\n", + "type": "stream", + "dbrps": [ + { + "db": "telegraf", + "rp": "autogen" + } + ], + "status": "disabled", + "executing": false, + "error": "", + "created": "2017-05-05T16:16:03.471138388-05:00", + "modified": "2017-05-23T15:57:42.625909746-05:00", + "last-enabled": "2017-05-05T16:16:25.890210217-05:00", "links": { - "self": "/chronograf/v1/sources/5/kapacitors/5/rules/chronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38", - "kapacitor": "/chronograf/v1/sources/5/kapacitors/5/proxy?path=%2Fkapacitor%2Fv1%2Ftasks%2Fchronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38", - "output": "/chronograf/v1/sources/5/kapacitors/5/proxy?path=%2Fkapacitor%2Fv1%2Ftasks%2Fchronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38%2Foutput" + "self": + "/chronograf/v1/sources/5/kapacitors/5/rules/chronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38", + "kapacitor": + "/chronograf/v1/sources/5/kapacitors/5/proxy?path=%2Fkapacitor%2Fv1%2Ftasks%2Fchronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38", + "output": + "/chronograf/v1/sources/5/kapacitors/5/proxy?path=%2Fkapacitor%2Fv1%2Ftasks%2Fchronograf-v1-b2b065ea-79bd-4e4f-8c0d-d0ef68477d38%2Foutput" } }, "required": ["query", "every", "trigger"], @@ -2700,11 +2773,13 @@ }, "every": { "type": "string", - "description": "Golang duration string specifying how often the alert condition is checked" + "description": + "Golang duration string specifying how often the alert condition is checked" }, "alerts": { "type": "array", - "description": "Array of alerting services to warn if the alert is triggered", + "description": + "Array of alerting services to warn if the alert is triggered", "items": { "type": "string", "enum": [ @@ -2718,8 +2793,11 @@ "smtp", "email", "exec", + "log", + "pushover", "sensu", "slack", + "smtp", "talk", "telegram", "tcp" @@ -2739,11 +2817,13 @@ }, "details": { "type": "string", - "description": "Template for constructing a detailed HTML message for the alert. (Currently, only used for email/smtp" + "description": + "Template for constructing a detailed HTML message for the alert. (Currently, only used for email/smtp" }, "trigger": { "type": "string", - "description": "Trigger defines the alerting structure; deadman alert if no data are received for the specified time range; relative alert if the data change relative to the data in a different time range; threshold alert if the data cross a boundary", + "description": + "Trigger defines the alerting structure; deadman alert if no data are received for the specified time range; relative alert if the data change relative to the data in a different time range; threshold alert if the data cross a boundary", "enum": ["deadman", "relative", "threshold"] }, "values": { @@ -2756,11 +2836,13 @@ "enum": ["% change", "change"] }, "period": { - "description": "Length of time before deadman is alerted (golang duration)", + "description": + "Length of time before deadman is alerted (golang duration)", "type": "string" }, "shift": { - "description": "Amount of time to look into the past to compare to the present (golang duration)", + "description": + "Amount of time to look into the past to compare to the present (golang duration)", "type": "string" }, "operator": { @@ -2778,7 +2860,8 @@ ] }, "value": { - "description": "Value is the boundary value when alert goes critical", + "description": + "Value is the boundary value when alert goes critical", "type": "string" }, "rangeValue": { @@ -2787,15 +2870,57 @@ } } }, + "dbrps": { + "type": "array", + "description": + "List of database retention policy pairs the task is allowed to access.", + "items": { + "$ref": "#/definitions/DBRP" + } + }, "tickscript": { "type": "string", "description": "TICKscript representing this rule" }, "status": { "type": "string", - "description": "Represents if this rule is enabled or disabled in kapacitor", + "description": + "Represents if this rule is enabled or disabled in kapacitor", "enum": ["enabled", "disabled"] }, + "executing": { + "type": "boolean", + "description": "Whether the task is currently executing.", + "readOnly": true + }, + "type": { + "type": "string", + "description": + "Represents the task type where stream is data streamed to kapacitor and batch is queried by kapacitor.", + "enum": ["stream", "batch"] + }, + "error": { + "type": "string", + "description": + "Any error encountered when kapacitor executes the task.", + "readOnly": true + }, + "created": { + "type": "string", + "description": "Date the task was first created", + "readOnly": true + }, + "modified": { + "type": "string", + "description": "Date the task was last modified", + "readOnly": true + }, + "last-enabled": { + "type": "string", + "description": "Date the task was last set to status enabled", + "readOnly": true + }, + "links": { "type": "object", "required": ["self", "kapacitor"], @@ -2806,12 +2931,14 @@ "format": "uri" }, "kapacitor": { - "description": "Link pointing to the kapacitor proxy for this rule including the path query parameter.", + "description": + "Link pointing to the kapacitor proxy for this rule including the path query parameter.", "type": "string", "format": "uri" }, "output": { - "description": "Link pointing to the kapacitor httpOut node of the tickscript; includes the path query argument", + "description": + "Link pointing to the kapacitor httpOut node of the tickscript; includes the path query argument", "type": "string", "format": "uri" } @@ -2819,17 +2946,20 @@ } } }, - "Sources": { + "DBRP": { "type": "object", - "required": ["sources"], + "description": "Database retention policy pair", "properties": { - "sources": { - "type": "array", - "items": { - "$ref": "#/definitions/Source" - } + "db": { + "description": "Database name", + "type": "string" + }, + "rp": { + "description": "Retention policy", + "type": "string" } - } + }, + "required": ["db", "rp"] }, "Source": { "type": "object", @@ -2854,7 +2984,8 @@ "properties": { "id": { "type": "string", - "description": "Unique identifier representing a specific data source.", + "description": + "Unique identifier representing a specific data source.", "readOnly": true }, "name": { @@ -2877,12 +3008,14 @@ }, "sharedSecret": { "type": "string", - "description": "JWT signing secret for optional Authorization: Bearer to InfluxDB" + "description": + "JWT signing secret for optional Authorization: Bearer to InfluxDB" }, "url": { "type": "string", "format": "url", - "description": "URL for the time series data source backend (e.g. http://localhost:8086)" + "description": + "URL for the time series data source backend (e.g. http://localhost:8086)" }, "metaUrl": { "type": "string", @@ -2891,7 +3024,8 @@ }, "insecureSkipVerify": { "type": "boolean", - "description": "True means any certificate presented by the source is accepted. Typically used for self-signed certs. Probably should only be used for testing." + "description": + "True means any certificate presented by the source is accepted. Typically used for self-signed certs. Probably should only be used for testing." }, "default": { "type": "boolean", @@ -2899,7 +3033,8 @@ }, "telegraf": { "type": "string", - "description": "Database where telegraf information is stored for this source", + "description": + "Database where telegraf information is stored for this source", "default": "telegraf" }, "links": { @@ -2922,27 +3057,32 @@ }, "queries": { "type": "string", - "description": "URL location of the queries endpoint for this source", + "description": + "URL location of the queries endpoint for this source", "format": "url" }, "kapacitors": { "type": "string", - "description": "URL location of the kapacitors endpoint for this source", + "description": + "URL location of the kapacitors endpoint for this source", "format": "url" }, "users": { "type": "string", - "description": "URL location of the users endpoint for this source", + "description": + "URL location of the users endpoint for this source", "format": "url" }, "permissions": { "type": "string", - "description": "URL location of the permissions endpoint for this source", + "description": + "URL location of the permissions endpoint for this source", "format": "url" }, "roles": { "type": "string", - "description": "Optional path to the roles endpoint IFF it is supported on this source", + "description": + "Optional path to the roles endpoint IFF it is supported on this source", "format": "url" } } @@ -2980,7 +3120,8 @@ }, "tempVars": { "type": "array", - "description": "Template variables to replace within an InfluxQL query", + "description": + "Template variables to replace within an InfluxQL query", "items": { "$ref": "#/definitions/TemplateVariable" } @@ -2989,7 +3130,8 @@ }, "TemplateVariable": { "type": "object", - "description": "Named variable within an InfluxQL query to be replaced with values", + "description": + "Named variable within an InfluxQL query to be replaced with values", "properties": { "tempVar": { "type": "string", @@ -3006,7 +3148,8 @@ }, "TemplateValue": { "type": "object", - "description": "Value use to replace a template in an InfluxQL query. The type governs the output format", + "description": + "Value use to replace a template in an InfluxQL query. The type governs the output format", "properties": { "value": { "type": "string", @@ -3015,7 +3158,8 @@ "type": { "type": "string", "enum": ["csv", "tagKey", "tagValue", "fieldKey", "timeStamp"], - "description": "The type will change the format of the output value. tagKey/fieldKey are double quoted; tagValue are single quoted; csv and timeStamp are not quoted." + "description": + "The type will change the format of the output value. tagKey/fieldKey are double quoted; tagValue are single quoted; csv and timeStamp are not quoted." } } }, @@ -3284,25 +3428,29 @@ } }, "Permissions": { - "description": "Permissions represent the entire set of permissions a User or Role may have", + "description": + "Permissions represent the entire set of permissions a User or Role may have", "type": "array", "items": { "$ref": "#/definitions/Permission" } }, "Permission": { - "description": "Permission is a specific allowance for User or Role bound to a scope of the data source", + "description": + "Permission is a specific allowance for User or Role bound to a scope of the data source", "type": "object", "required": ["scope", "allowed"], "properties": { "scope": { "type": "string", - "description": "Describes if the permission is for all databases or restricted to one database", + "description": + "Describes if the permission is for all databases or restricted to one database", "enum": ["all", "database"] }, "name": { "type": "string", - "description": "If the scope is database this identifies the name of the database" + "description": + "If the scope is database this identifies the name of the database" }, "allowed": { "$ref": "#/definitions/Allowances" @@ -3315,7 +3463,8 @@ } }, "AllPermissions": { - "description": "All possible permissions for this particular datasource. Used as a static list", + "description": + "All possible permissions for this particular datasource. Used as a static list", "type": "object", "properties": { "permissions": { @@ -3339,11 +3488,13 @@ } }, "Allowances": { - "description": "Allowances defines what actions a user can have on a scoped permission", + "description": + "Allowances defines what actions a user can have on a scoped permission", "type": "array", "items": { "type": "string", - "description": "OSS InfluxDB is READ and WRITE. Enterprise is all others", + "description": + "OSS InfluxDB is READ and WRITE. Enterprise is all others", "enum": [ "READ", "WRITE", @@ -3387,7 +3538,8 @@ "properties": { "id": { "type": "string", - "description": "ID is an opaque string that uniquely identifies this layout." + "description": + "ID is an opaque string that uniquely identifies this layout." }, "app": { "type": "string", @@ -3395,7 +3547,8 @@ }, "measurement": { "type": "string", - "description": "Measurement is the descriptive name of the time series data." + "description": + "Measurement is the descriptive name of the time series data." }, "cells": { "type": "array", @@ -3423,11 +3576,13 @@ "name": "Docker - Container Network", "queries": [ { - "query": "SELECT derivative(mean(\"tx_bytes\"), 10s) AS \"net_tx_bytes\" FROM \"docker_container_net\"", + "query": + "SELECT derivative(mean(\"tx_bytes\"), 10s) AS \"net_tx_bytes\" FROM \"docker_container_net\"", "groupbys": ["\"container_name\""] }, { - "query": "SELECT derivative(mean(\"rx_bytes\"), 10s) AS \"net_rx_bytes\" FROM \"docker_container_net\"", + "query": + "SELECT derivative(mean(\"rx_bytes\"), 10s) AS \"net_rx_bytes\" FROM \"docker_container_net\"", "groupbys": ["\"container_name\""] } ], @@ -3461,7 +3616,8 @@ "type": "string" }, "name": { - "description": "The application name which will be assigned to the corresponding measurement", + "description": + "The application name which will be assigned to the corresponding measurement", "type": "string" } }, @@ -3523,7 +3679,8 @@ "name": "usage_user", "queries": [ { - "query": "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"", + "query": + "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"", "label": "%" } ], @@ -3575,7 +3732,8 @@ }, "example": { "label": "# warnings", - "query": "SELECT count(\"check_id\") as \"Number Warning\" FROM consul_health_checks", + "query": + "SELECT count(\"check_id\") as \"Number Warning\" FROM consul_health_checks", "wheres": ["\"status\" = 'warning'"], "groupbys": ["\"service_name\""] } @@ -3623,7 +3781,8 @@ "name": "", "queries": [ { - "query": "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"", + "query": + "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"", "label": "%", "queryConfig": { "database": "", @@ -3782,7 +3941,8 @@ "name": "usage_user", "queries": [ { - "query": "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"", + "query": + "SELECT mean(\"usage_user\") AS \"usage_user\" FROM \"cpu\"", "db": "telegraf", "label": "%" } @@ -3797,7 +3957,8 @@ "name": "usage_system", "queries": [ { - "query": "SELECT mean(\"usage_system\") AS \"usage_system\" FROM \"cpu\"", + "query": + "SELECT mean(\"usage_system\") AS \"usage_system\" FROM \"cpu\"", "db": "telegraf", "label": "%" } @@ -3819,7 +3980,8 @@ "type": "array", "minItems": 0, "maxItems": 2, - "description": "The extents of an axis in the form [lower, upper]. Clients determine whether bounds are to be inclusive or exclusive of their limits", + "description": + "The extents of an axis in the form [lower, upper]. Clients determine whether bounds are to be inclusive or exclusive of their limits", "items": { "type": "integer", "format": "int64" @@ -3830,19 +3992,23 @@ "type": "string" }, "prefix": { - "description": "Prefix represents a label prefix for formatting axis values.", + "description": + "Prefix represents a label prefix for formatting axis values.", "type": "string" }, "suffix": { - "description": "Suffix represents a label suffix for formatting axis values.", + "description": + "Suffix represents a label suffix for formatting axis values.", "type": "string" }, "base": { - "description": "Base represents the radix for formatting axis values.", + "description": + "Base represents the radix for formatting axis values.", "type": "string" }, "scale": { - "description": "Scale is the axis formatting scale. Supported: \"log\", \"linear\"", + "description": + "Scale is the axis formatting scale. Supported: \"log\", \"linear\"", "type": "string" } } @@ -3876,16 +4042,19 @@ "format": "url" }, "external": { - "description": "external links provided to client, ex. status feed URL", + "description": + "external links provided to client, ex. status feed URL", "type": "object", "properties": { "statusFeed": { - "description": "link to a JSON Feed for the News Feed on client's Status Page", + "description": + "link to a JSON Feed for the News Feed on client's Status Page", "type": "string", "format": "url" }, "custom": { - "description": "a collection of custom links set by the user to be rendered in the client User menu", + "description": + "a collection of custom links set by the user to be rendered in the client User menu", "type": "array", "items": { "type": "object", diff --git a/ui/yarn.lock b/ui/yarn.lock index 7b168719d2..af7e628795 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -2749,11 +2749,11 @@ escape-html@~1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" -escape-string-regexp@1.0.2, escape-string-regexp@^1.0.2: +escape-string-regexp@1.0.2, escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.2.tgz#4dbc2fe674e71949caf3fb2695ce7f2dc1d9a8d1" -escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.5: +escape-string-regexp@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" @@ -4512,17 +4512,17 @@ lodash.words@^3.0.0: dependencies: lodash._root "^3.0.0" -lodash@4.x.x, lodash@^4.0.0, lodash@^4.0.1, lodash@^4.1.0, lodash@^4.16.4, lodash@^4.17.2, lodash@^4.2.0, lodash@^4.2.1, lodash@^4.3.0, lodash@^4.5.0, lodash@^4.5.1: - version "4.17.3" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.3.tgz#557ed7d2a9438cac5fd5a43043ca60cb455e01f7" +lodash@4.x.x, lodash@^4.0.1, lodash@^4.1.0, lodash@^4.16.4, lodash@^4.17.4, lodash@^4.5.0: + version "4.17.4" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae" lodash@^3.8.0: version "3.10.1" resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6" -lodash@^4.17.4: - version "4.17.4" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae" +lodash@^4.0.0, lodash@^4.17.2, lodash@^4.2.0, lodash@^4.2.1, lodash@^4.3.0, lodash@^4.5.1: + version "4.17.3" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.3.tgz#557ed7d2a9438cac5fd5a43043ca60cb455e01f7" lodash@~4.16.4: version "4.16.6"