Merge branch 'master' into chore/merge-master

pull/17807/head
jlapacik 2020-04-20 13:59:21 -07:00
commit 335968a552
62 changed files with 6553 additions and 2715 deletions

View File

@ -24,6 +24,7 @@
### Bug Fixes
1. [17257](https://github.com/influxdata/influxdb/pull/17769): Fix retention policy after bucket is migrated
1. [17612](https://github.com/influxdata/influxdb/pull/17612): Fix card size and layout jank in dashboards index view
1. [17651](https://github.com/influxdata/influxdb/pull/17651): Fix check graph font and lines defaulting to black causing graph to be unreadable
1. [17660](https://github.com/influxdata/influxdb/pull/17660): Fix text wrapping display issue and popover sizing bug when adding labels to a resource

View File

@ -1080,7 +1080,7 @@ func (b *cmdPkgBuilder) printPkgSummary(sum pkger.Summary) error {
v.Description,
v.Every,
v.Offset,
v.EndpointName,
v.EndpointPkgName,
v.EndpointID.String(),
v.EndpointType,
}

View File

@ -679,7 +679,7 @@ spec:
assert.NotZero(t, rule.ID)
assert.Equal(t, "rule_0", rule.Name)
assert.Equal(t, pkger.SafeID(endpoints[0].NotificationEndpoint.GetID()), rule.EndpointID)
assert.Equal(t, "http_none_auth_notification_endpoint", rule.EndpointName)
assert.Equal(t, "http_none_auth_notification_endpoint", rule.EndpointPkgName)
assert.Equalf(t, "http", rule.EndpointType, "rule: %+v", rule)
require.Len(t, sum1.Tasks, 1)
@ -801,7 +801,7 @@ spec:
rule := sum.NotificationRules[0]
assert.Equal(t, "rule_0", rule.Name)
assert.Equal(t, pkger.SafeID(endpoints[0].NotificationEndpoint.GetID()), rule.EndpointID)
assert.NotEmpty(t, rule.EndpointName)
assert.NotEmpty(t, rule.EndpointPkgName)
require.Len(t, sum.Tasks, 1)
task := sum.Tasks[0]
@ -1107,7 +1107,7 @@ spec:
newRule := newSum.NotificationRules[0]
assert.Equal(t, "new rule name", newRule.Name)
assert.Zero(t, newRule.EndpointID)
assert.NotEmpty(t, newRule.EndpointName)
assert.NotEmpty(t, newRule.EndpointPkgName)
hasLabelAssociations(t, newRule.LabelAssociations, 1, "label_1")
require.Len(t, newSum.Tasks, 1)
@ -1403,7 +1403,7 @@ spec:
assert.Equal(t, "endpoint_threeve", sum.NotificationEndpoints[0].NotificationEndpoint.GetName())
assert.Equal(t, "label_threeve", sum.Labels[0].Name)
assert.Equal(t, "rule_threeve", sum.NotificationRules[0].Name)
assert.Equal(t, "endpoint_threeve", sum.NotificationRules[0].EndpointName)
assert.Equal(t, "endpoint_threeve", sum.NotificationRules[0].EndpointPkgName)
assert.Equal(t, "telegraf_threeve", sum.TelegrafConfigs[0].TelegrafConfig.Name)
assert.Equal(t, "task_threeve", sum.Tasks[0].Name)
assert.Equal(t, "var_threeve", sum.Variables[0].Name)

2
go.mod
View File

@ -29,7 +29,7 @@ require (
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 // indirect
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 // indirect
github.com/go-chi/chi v4.1.0+incompatible
github.com/gogo/protobuf v1.2.1
github.com/gogo/protobuf v1.3.1
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021
github.com/golang/protobuf v1.3.2
github.com/golang/snappy v0.0.1

4
go.sum
View File

@ -137,6 +137,8 @@ github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021 h1:HYV500jCgk+IC68L5sWrLFIWMpaUFfXXpJSAb7XOoBk=
github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
@ -281,6 +283,7 @@ github.com/kamilsk/retry v0.0.0-20181229152359-495c1d672c93/go.mod h1:vW4uuVWZOG
github.com/kevinburke/go-bindata v3.11.0+incompatible h1:RcC+GJNmrBHbGaOpQ9MBD8z22rdzlIm0esDRDkyxd4s=
github.com/kevinburke/go-bindata v3.11.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
@ -587,6 +590,7 @@ golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=

View File

@ -7432,7 +7432,7 @@ components:
type: string
description:
type: string
endpointName:
endpointPkgName:
type: string
endpointID:
type: string
@ -7677,8 +7677,8 @@ components:
items:
type: object
properties:
remove:
type: boolean
stateStatus:
type: string
id:
type: string
pkgName:

View File

@ -3,16 +3,12 @@ package pkger
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
"time"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/notification"
icheck "github.com/influxdata/influxdb/v2/notification/check"
"github.com/influxdata/influxdb/v2/notification/endpoint"
"github.com/influxdata/influxdb/v2/notification/rule"
)
// Package kind types.
@ -363,78 +359,6 @@ type (
}
)
func newDiffNotificationRule(r *notificationRule, iEndpoint influxdb.NotificationEndpoint) DiffNotificationRule {
sum := DiffNotificationRule{
DiffIdentifier: DiffIdentifier{
ID: SafeID(r.ID()),
Remove: r.shouldRemove,
PkgName: r.PkgName(),
},
New: DiffNotificationRuleValues{
Name: r.Name(),
Description: r.description,
EndpointName: r.endpointName.String(),
Every: r.every.String(),
Offset: r.offset.String(),
MessageTemplate: r.msgTemplate,
StatusRules: toSummaryStatusRules(r.statusRules),
TagRules: toSummaryTagRules(r.tagRules),
},
}
if iEndpoint != nil {
sum.New.EndpointID = SafeID(iEndpoint.GetID())
sum.New.EndpointType = iEndpoint.Type()
}
if r.existing == nil {
return sum
}
sum.Old = &DiffNotificationRuleValues{
Name: r.existing.rule.GetName(),
Description: r.existing.rule.GetDescription(),
EndpointName: r.existing.endpointName,
EndpointID: SafeID(r.existing.rule.GetEndpointID()),
EndpointType: r.existing.endpointType,
}
assignBase := func(b rule.Base) {
if b.Every != nil {
sum.Old.Every = b.Every.TimeDuration().String()
}
if b.Offset != nil {
sum.Old.Offset = b.Offset.TimeDuration().String()
}
for _, tr := range b.TagRules {
sum.Old.TagRules = append(sum.Old.TagRules, SummaryTagRule{
Key: tr.Key,
Value: tr.Value,
Operator: tr.Operator.String(),
})
}
for _, sr := range b.StatusRules {
sRule := SummaryStatusRule{CurrentLevel: sr.CurrentLevel.String()}
if sr.PreviousLevel != nil {
sRule.PreviousLevel = sr.PreviousLevel.String()
}
sum.Old.StatusRules = append(sum.Old.StatusRules, sRule)
}
}
switch p := r.existing.rule.(type) {
case *rule.HTTP:
assignBase(p.Base)
case *rule.Slack:
assignBase(p.Base)
sum.Old.MessageTemplate = p.MessageTemplate
case *rule.PagerDuty:
assignBase(p.Base)
sum.Old.MessageTemplate = p.MessageTemplate
}
return sum
}
type (
// DiffTask is a diff of an individual task.
DiffTask struct {
@ -636,10 +560,10 @@ type (
Name string `json:"name"`
Description string `json:"description"`
// These 3 fields represent the relationship of the rule to the endpoint.
EndpointID SafeID `json:"endpointID"`
EndpointName string `json:"endpointName"`
EndpointType string `json:"endpointType"`
// These fields represent the relationship of the rule to the endpoint.
EndpointID SafeID `json:"endpointID"`
EndpointPkgName string `json:"endpointPkgName"`
EndpointType string `json:"endpointType"`
Every string `json:"every"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
@ -719,344 +643,3 @@ type SummaryVariable struct {
Arguments *influxdb.VariableArguments `json:"arguments"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
const (
fieldNotificationRuleChannel = "channel"
fieldNotificationRuleCurrentLevel = "currentLevel"
fieldNotificationRuleEndpointName = "endpointName"
fieldNotificationRuleMessageTemplate = "messageTemplate"
fieldNotificationRulePreviousLevel = "previousLevel"
fieldNotificationRuleStatusRules = "statusRules"
fieldNotificationRuleTagRules = "tagRules"
)
type notificationRule struct {
identity
id influxdb.ID
orgID influxdb.ID
channel string
description string
every time.Duration
msgTemplate string
offset time.Duration
status string
statusRules []struct{ curLvl, prevLvl string }
tagRules []struct{ k, v, op string }
endpointID influxdb.ID
endpointName *references
endpointType string
existing *existingRule
labels sortedLabels
}
type existingRule struct {
rule influxdb.NotificationRule
endpointName string
endpointType string
}
func (r *notificationRule) Exists() bool {
return r.existing != nil
}
func (r *notificationRule) ID() influxdb.ID {
if r.existing != nil {
return r.existing.rule.GetID()
}
return r.id
}
func (r *notificationRule) Labels() []*label {
return r.labels
}
func (r *notificationRule) ResourceType() influxdb.ResourceType {
return KindNotificationRule.ResourceType()
}
func (r *notificationRule) Status() influxdb.Status {
if r.status == "" {
return influxdb.Active
}
return influxdb.Status(r.status)
}
func (r *notificationRule) summarize() SummaryNotificationRule {
return SummaryNotificationRule{
ID: SafeID(r.ID()),
PkgName: r.PkgName(),
Name: r.Name(),
EndpointID: SafeID(r.endpointID),
EndpointName: r.endpointName.String(),
EndpointType: r.endpointType,
Description: r.description,
Every: r.every.String(),
LabelAssociations: toSummaryLabels(r.labels...),
Offset: r.offset.String(),
MessageTemplate: r.msgTemplate,
Status: r.Status(),
StatusRules: toSummaryStatusRules(r.statusRules),
TagRules: toSummaryTagRules(r.tagRules),
}
}
func (r *notificationRule) toInfluxRule() influxdb.NotificationRule {
base := rule.Base{
ID: r.ID(),
Name: r.Name(),
Description: r.description,
EndpointID: r.endpointID,
OrgID: r.orgID,
Every: toNotificationDuration(r.every),
Offset: toNotificationDuration(r.offset),
}
for _, sr := range r.statusRules {
var prevLvl *notification.CheckLevel
if lvl := notification.ParseCheckLevel(sr.prevLvl); lvl != notification.Unknown {
prevLvl = &lvl
}
base.StatusRules = append(base.StatusRules, notification.StatusRule{
CurrentLevel: notification.ParseCheckLevel(sr.curLvl),
PreviousLevel: prevLvl,
})
}
for _, tr := range r.tagRules {
op, _ := influxdb.ToOperator(tr.op)
base.TagRules = append(base.TagRules, notification.TagRule{
Tag: influxdb.Tag{
Key: tr.k,
Value: tr.v,
},
Operator: op,
})
}
switch r.endpointType {
case "http":
return &rule.HTTP{Base: base}
case "pagerduty":
return &rule.PagerDuty{
Base: base,
MessageTemplate: r.msgTemplate,
}
case "slack":
return &rule.Slack{
Base: base,
Channel: r.channel,
MessageTemplate: r.msgTemplate,
}
}
return nil
}
func (r *notificationRule) valid() []validationErr {
var vErrs []validationErr
if !r.endpointName.hasValue() {
vErrs = append(vErrs, validationErr{
Field: fieldNotificationRuleEndpointName,
Msg: "must be provided",
})
}
if r.every == 0 {
vErrs = append(vErrs, validationErr{
Field: fieldEvery,
Msg: "must be provided",
})
}
if status := r.Status(); status != influxdb.Active && status != influxdb.Inactive {
vErrs = append(vErrs, validationErr{
Field: fieldStatus,
Msg: fmt.Sprintf("must be 1 in [active, inactive]; got=%q", r.status),
})
}
if len(r.statusRules) == 0 {
vErrs = append(vErrs, validationErr{
Field: fieldNotificationRuleStatusRules,
Msg: "must provide at least 1",
})
}
var sRuleErrs []validationErr
for i, sRule := range r.statusRules {
if notification.ParseCheckLevel(sRule.curLvl) == notification.Unknown {
sRuleErrs = append(sRuleErrs, validationErr{
Field: fieldNotificationRuleCurrentLevel,
Msg: fmt.Sprintf("must be 1 in [CRIT, WARN, INFO, OK]; got=%q", sRule.curLvl),
Index: intPtr(i),
})
}
if sRule.prevLvl != "" && notification.ParseCheckLevel(sRule.prevLvl) == notification.Unknown {
sRuleErrs = append(sRuleErrs, validationErr{
Field: fieldNotificationRulePreviousLevel,
Msg: fmt.Sprintf("must be 1 in [CRIT, WARN, INFO, OK]; got=%q", sRule.prevLvl),
Index: intPtr(i),
})
}
}
if len(sRuleErrs) > 0 {
vErrs = append(vErrs, validationErr{
Field: fieldNotificationRuleStatusRules,
Nested: sRuleErrs,
})
}
var tagErrs []validationErr
for i, tRule := range r.tagRules {
if _, ok := influxdb.ToOperator(tRule.op); !ok {
tagErrs = append(tagErrs, validationErr{
Field: fieldOperator,
Msg: fmt.Sprintf("must be 1 in [equal]; got=%q", tRule.op),
Index: intPtr(i),
})
}
}
if len(tagErrs) > 0 {
vErrs = append(vErrs, validationErr{
Field: fieldNotificationRuleTagRules,
Nested: tagErrs,
})
}
if len(vErrs) > 0 {
return []validationErr{
objectValidationErr(fieldSpec, vErrs...),
}
}
return nil
}
func toSummaryStatusRules(statusRules []struct{ curLvl, prevLvl string }) []SummaryStatusRule {
out := make([]SummaryStatusRule, 0, len(statusRules))
for _, sRule := range statusRules {
out = append(out, SummaryStatusRule{
CurrentLevel: sRule.curLvl,
PreviousLevel: sRule.prevLvl,
})
}
sort.Slice(out, func(i, j int) bool {
si, sj := out[i], out[j]
if si.CurrentLevel == sj.CurrentLevel {
return si.PreviousLevel < sj.PreviousLevel
}
return si.CurrentLevel < sj.CurrentLevel
})
return out
}
func toSummaryTagRules(tagRules []struct{ k, v, op string }) []SummaryTagRule {
out := make([]SummaryTagRule, 0, len(tagRules))
for _, tRule := range tagRules {
out = append(out, SummaryTagRule{
Key: tRule.k,
Value: tRule.v,
Operator: tRule.op,
})
}
sort.Slice(out, func(i, j int) bool {
ti, tj := out[i], out[j]
if ti.Key == tj.Key && ti.Value == tj.Value {
return ti.Operator < tj.Operator
}
if ti.Key == tj.Key {
return ti.Value < tj.Value
}
return ti.Key < tj.Key
})
return out
}
type mapperNotificationRules []*notificationRule
func (r mapperNotificationRules) Association(i int) labelAssociater {
return r[i]
}
func (r mapperNotificationRules) Len() int {
return len(r)
}
const (
fieldReferencesEnv = "envRef"
fieldReferencesSecret = "secretRef"
)
type references struct {
val interface{}
EnvRef string
Secret string
}
func (r *references) hasValue() bool {
return r.EnvRef != "" || r.Secret != "" || r.val != nil
}
func (r *references) String() string {
if r == nil {
return ""
}
if v := r.StringVal(); v != "" {
return v
}
if r.EnvRef != "" {
return "$" + r.EnvRef
}
return ""
}
func (r *references) StringVal() string {
if r.val != nil {
s, _ := r.val.(string)
return s
}
return ""
}
func (r *references) SecretField() influxdb.SecretField {
if secret := r.Secret; secret != "" {
return influxdb.SecretField{Key: secret}
}
if str := r.StringVal(); str != "" {
return influxdb.SecretField{Value: &str}
}
return influxdb.SecretField{}
}
func isValidName(name string, minLength int) (validationErr, bool) {
if len(name) >= minLength {
return validationErr{}, true
}
return validationErr{
Field: fieldName,
Msg: fmt.Sprintf("must be a string of at least %d chars in length", minLength),
}, false
}
func toNotificationDuration(dur time.Duration) *notification.Duration {
d, _ := notification.FromTimeDuration(dur)
return &d
}
func durToStr(dur time.Duration) string {
if dur == 0 {
return ""
}
return dur.String()
}
func flt64Ptr(f float64) *float64 {
if f != 0 {
return &f
}
return nil
}
func intPtr(i int) *int {
return &i
}

View File

@ -385,58 +385,36 @@ func (p *Pkg) applySecrets(secrets map[string]string) {
// Contains identifies if a pkg contains a given object identified
// by its kind and metadata.Name (PkgName) field.
func (p *Pkg) Contains(k Kind, pkgName string) bool {
_, ok := p.getObjectIDSetter(k, pkgName)
return ok
}
// setObjectID sets the id for the resource graphed from the object the key identifies.
func (p *Pkg) setObjectID(k Kind, pkgName string, id influxdb.ID) {
idSetFn, ok := p.getObjectIDSetter(k, pkgName)
if !ok {
return
}
idSetFn(id)
}
// setObjectID sets the id for the resource graphed from the object the key identifies.
// The pkgName and kind are used as the unique identifier, when calling this it will
// overwrite any existing value if one exists. If desired, check for the value by using
// the Contains method.
func (p *Pkg) addObjectForRemoval(k Kind, pkgName string, id influxdb.ID) {
newIdentity := identity{
name: &references{val: pkgName},
shouldRemove: true,
}
switch k {
case KindBucket:
_, ok := p.mBuckets[pkgName]
return ok
case KindCheck, KindCheckDeadman, KindCheckThreshold:
_, ok := p.mChecks[pkgName]
return ok
case KindLabel:
p.mLabels[pkgName] = &label{
identity: newIdentity,
id: id,
}
_, ok := p.mLabels[pkgName]
return ok
case KindNotificationEndpoint,
KindNotificationEndpointHTTP,
KindNotificationEndpointPagerDuty,
KindNotificationEndpointSlack:
_, ok := p.mNotificationEndpoints[pkgName]
return ok
case KindNotificationRule:
p.mNotificationRules[pkgName] = &notificationRule{
identity: newIdentity,
id: id,
}
}
}
func (p *Pkg) getObjectIDSetter(k Kind, pkgName string) (func(influxdb.ID), bool) {
switch k {
case KindLabel:
l, ok := p.mLabels[pkgName]
return func(id influxdb.ID) {
l.id = id
}, ok
case KindNotificationRule:
r, ok := p.mNotificationRules[pkgName]
return func(id influxdb.ID) {
r.id = id
}, ok
default:
return nil, false
_, ok := p.mNotificationRules[pkgName]
return ok
case KindTask:
_, ok := p.mTasks[pkgName]
return ok
case KindTelegraf:
_, ok := p.mTelegrafs[pkgName]
return ok
case KindVariable:
_, ok := p.mVariables[pkgName]
return ok
}
return false
}
// Combine combines pkgs together. Is useful when you want to take multiple disparate pkgs
@ -996,6 +974,8 @@ func (p *Pkg) graphNotificationRules() *parseErr {
})
}
rule.associatedEndpoint = p.mNotificationEndpoints[rule.endpointName.String()]
failures := p.parseNestedLabels(o.Spec, func(l *label) error {
rule.labels = append(rule.labels, l)
p.mLabels[l.PkgName()].setMapping(rule, false)

View File

@ -4,6 +4,7 @@ import (
"fmt"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"time"
@ -12,6 +13,7 @@ import (
"github.com/influxdata/influxdb/v2/notification"
icheck "github.com/influxdata/influxdb/v2/notification/check"
"github.com/influxdata/influxdb/v2/notification/endpoint"
"github.com/influxdata/influxdb/v2/notification/rule"
)
type identity struct {
@ -307,6 +309,83 @@ func (c *check) valid() []validationErr {
return nil
}
type thresholdType string
const (
thresholdTypeGreater thresholdType = "greater"
thresholdTypeLesser thresholdType = "lesser"
thresholdTypeInsideRange thresholdType = "inside_range"
thresholdTypeOutsideRange thresholdType = "outside_range"
)
var thresholdTypes = map[thresholdType]bool{
thresholdTypeGreater: true,
thresholdTypeLesser: true,
thresholdTypeInsideRange: true,
thresholdTypeOutsideRange: true,
}
type threshold struct {
threshType thresholdType
allVals bool
level string
val float64
min, max float64
}
func (t threshold) valid() []validationErr {
var vErrs []validationErr
if notification.ParseCheckLevel(t.level) == notification.Unknown {
vErrs = append(vErrs, validationErr{
Field: fieldLevel,
Msg: fmt.Sprintf("must be 1 in [CRIT, WARN, INFO, OK]; got=%q", t.level),
})
}
if !thresholdTypes[t.threshType] {
vErrs = append(vErrs, validationErr{
Field: fieldType,
Msg: fmt.Sprintf("must be 1 in [Lesser, Greater, Inside_Range, Outside_Range]; got=%q", t.threshType),
})
}
if t.min > t.max {
vErrs = append(vErrs, validationErr{
Field: fieldMin,
Msg: "min must be < max",
})
}
return vErrs
}
func toInfluxThresholds(thresholds ...threshold) []icheck.ThresholdConfig {
var iThresh []icheck.ThresholdConfig
for _, th := range thresholds {
base := icheck.ThresholdConfigBase{
AllValues: th.allVals,
Level: notification.ParseCheckLevel(th.level),
}
switch th.threshType {
case thresholdTypeGreater:
iThresh = append(iThresh, icheck.Greater{
ThresholdConfigBase: base,
Value: th.val,
})
case thresholdTypeLesser:
iThresh = append(iThresh, icheck.Lesser{
ThresholdConfigBase: base,
Value: th.val,
})
case thresholdTypeInsideRange, thresholdTypeOutsideRange:
iThresh = append(iThresh, icheck.Range{
ThresholdConfigBase: base,
Max: th.max,
Min: th.min,
Within: th.threshType == thresholdTypeInsideRange,
})
}
}
return iThresh
}
// chartKind identifies what kind of chart is eluded too. Each
// chart kind has their own requirements for what constitutes
// a chart.
@ -996,13 +1075,6 @@ type assocMapVal struct {
v interface{}
}
func (l assocMapVal) ID() influxdb.ID {
if t, ok := l.v.(labelAssociater); ok {
return t.ID()
}
return 0
}
func (l assocMapVal) PkgName() string {
t, ok := l.v.(interface{ PkgName() string })
if ok {
@ -1055,17 +1127,11 @@ const (
const labelNameMinLength = 2
type label struct {
id influxdb.ID
identity
Color string
Description string
associationMapping
// exists provides context for a resource that already
// exists in the platform. If a resource already exists(exists=true)
// then the ID should be populated.
existing *influxdb.Label
}
func (l *label) summarize() SummaryLabel {
@ -1093,11 +1159,9 @@ func (l *label) mappingSummary() []SummaryLabelMapping {
mappings = append(mappings, SummaryLabelMapping{
exists: v.exists,
Status: status,
ResourceID: SafeID(v.ID()),
ResourcePkgName: v.PkgName(),
ResourceName: resource.name,
ResourceType: resource.resType,
LabelID: SafeID(l.ID()),
LabelPkgName: l.PkgName(),
LabelName: l.Name(),
})
@ -1107,16 +1171,6 @@ func (l *label) mappingSummary() []SummaryLabelMapping {
return mappings
}
func (l *label) ID() influxdb.ID {
if l.id != 0 {
return l.id
}
if l.existing != nil {
return l.existing.ID
}
return 0
}
func (l *label) valid() []validationErr {
var vErrs []validationErr
if err, ok := isValidName(l.Name(), labelNameMinLength); !ok {
@ -1152,83 +1206,6 @@ func (s sortedLabels) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
type thresholdType string
const (
thresholdTypeGreater thresholdType = "greater"
thresholdTypeLesser thresholdType = "lesser"
thresholdTypeInsideRange thresholdType = "inside_range"
thresholdTypeOutsideRange thresholdType = "outside_range"
)
var thresholdTypes = map[thresholdType]bool{
thresholdTypeGreater: true,
thresholdTypeLesser: true,
thresholdTypeInsideRange: true,
thresholdTypeOutsideRange: true,
}
type threshold struct {
threshType thresholdType
allVals bool
level string
val float64
min, max float64
}
func (t threshold) valid() []validationErr {
var vErrs []validationErr
if notification.ParseCheckLevel(t.level) == notification.Unknown {
vErrs = append(vErrs, validationErr{
Field: fieldLevel,
Msg: fmt.Sprintf("must be 1 in [CRIT, WARN, INFO, OK]; got=%q", t.level),
})
}
if !thresholdTypes[t.threshType] {
vErrs = append(vErrs, validationErr{
Field: fieldType,
Msg: fmt.Sprintf("must be 1 in [Lesser, Greater, Inside_Range, Outside_Range]; got=%q", t.threshType),
})
}
if t.min > t.max {
vErrs = append(vErrs, validationErr{
Field: fieldMin,
Msg: "min must be < max",
})
}
return vErrs
}
func toInfluxThresholds(thresholds ...threshold) []icheck.ThresholdConfig {
var iThresh []icheck.ThresholdConfig
for _, th := range thresholds {
base := icheck.ThresholdConfigBase{
AllValues: th.allVals,
Level: notification.ParseCheckLevel(th.level),
}
switch th.threshType {
case thresholdTypeGreater:
iThresh = append(iThresh, icheck.Greater{
ThresholdConfigBase: base,
Value: th.val,
})
case thresholdTypeLesser:
iThresh = append(iThresh, icheck.Lesser{
ThresholdConfigBase: base,
Value: th.val,
})
case thresholdTypeInsideRange, thresholdTypeOutsideRange:
iThresh = append(iThresh, icheck.Range{
ThresholdConfigBase: base,
Max: th.max,
Min: th.min,
Within: th.threshType == thresholdTypeInsideRange,
})
}
}
return iThresh
}
type notificationEndpointKind int
const (
@ -1237,6 +1214,17 @@ const (
notificationKindSlack
)
func (n notificationEndpointKind) String() string {
if n > 0 && n < 4 {
return [...]string{
endpoint.HTTPType,
endpoint.PagerDutyType,
endpoint.SlackType,
}[n-1]
}
return ""
}
const (
notificationHTTPAuthTypeBasic = "basic"
notificationHTTPAuthTypeBearer = "bearer"
@ -1423,6 +1411,242 @@ func (n *notificationEndpoint) valid() []validationErr {
return nil
}
const (
fieldNotificationRuleChannel = "channel"
fieldNotificationRuleCurrentLevel = "currentLevel"
fieldNotificationRuleEndpointName = "endpointName"
fieldNotificationRuleMessageTemplate = "messageTemplate"
fieldNotificationRulePreviousLevel = "previousLevel"
fieldNotificationRuleStatusRules = "statusRules"
fieldNotificationRuleTagRules = "tagRules"
)
type notificationRule struct {
identity
channel string
description string
every time.Duration
msgTemplate string
offset time.Duration
status string
statusRules []struct{ curLvl, prevLvl string }
tagRules []struct{ k, v, op string }
associatedEndpoint *notificationEndpoint
endpointName *references
labels sortedLabels
}
func (r *notificationRule) Labels() []*label {
return r.labels
}
func (r *notificationRule) ResourceType() influxdb.ResourceType {
return KindNotificationRule.ResourceType()
}
func (r *notificationRule) Status() influxdb.Status {
if r.status == "" {
return influxdb.Active
}
return influxdb.Status(r.status)
}
func (r *notificationRule) summarize() SummaryNotificationRule {
var endpointPkgName, endpointType string
if r.associatedEndpoint != nil {
endpointPkgName = r.associatedEndpoint.PkgName()
endpointType = r.associatedEndpoint.kind.String()
}
return SummaryNotificationRule{
PkgName: r.PkgName(),
Name: r.Name(),
EndpointPkgName: endpointPkgName,
EndpointType: endpointType,
Description: r.description,
Every: r.every.String(),
LabelAssociations: toSummaryLabels(r.labels...),
Offset: r.offset.String(),
MessageTemplate: r.msgTemplate,
Status: r.Status(),
StatusRules: toSummaryStatusRules(r.statusRules),
TagRules: toSummaryTagRules(r.tagRules),
}
}
func (r *notificationRule) toInfluxRule() influxdb.NotificationRule {
base := rule.Base{
Name: r.Name(),
Description: r.description,
Every: toNotificationDuration(r.every),
Offset: toNotificationDuration(r.offset),
}
for _, sr := range r.statusRules {
var prevLvl *notification.CheckLevel
if lvl := notification.ParseCheckLevel(sr.prevLvl); lvl != notification.Unknown {
prevLvl = &lvl
}
base.StatusRules = append(base.StatusRules, notification.StatusRule{
CurrentLevel: notification.ParseCheckLevel(sr.curLvl),
PreviousLevel: prevLvl,
})
}
for _, tr := range r.tagRules {
op, _ := influxdb.ToOperator(tr.op)
base.TagRules = append(base.TagRules, notification.TagRule{
Tag: influxdb.Tag{
Key: tr.k,
Value: tr.v,
},
Operator: op,
})
}
switch r.associatedEndpoint.kind {
case notificationKindHTTP:
return &rule.HTTP{Base: base}
case notificationKindPagerDuty:
return &rule.PagerDuty{
Base: base,
MessageTemplate: r.msgTemplate,
}
case notificationKindSlack:
return &rule.Slack{
Base: base,
Channel: r.channel,
MessageTemplate: r.msgTemplate,
}
}
return nil
}
func (r *notificationRule) valid() []validationErr {
var vErrs []validationErr
if !r.endpointName.hasValue() {
vErrs = append(vErrs, validationErr{
Field: fieldNotificationRuleEndpointName,
Msg: "must be provided",
})
} else if r.associatedEndpoint == nil {
vErrs = append(vErrs, validationErr{
Field: fieldNotificationRuleEndpointName,
Msg: fmt.Sprintf("notification endpoint %q does not exist in pkg", r.endpointName.String()),
})
}
if r.every == 0 {
vErrs = append(vErrs, validationErr{
Field: fieldEvery,
Msg: "must be provided",
})
}
if status := r.Status(); status != influxdb.Active && status != influxdb.Inactive {
vErrs = append(vErrs, validationErr{
Field: fieldStatus,
Msg: fmt.Sprintf("must be 1 in [active, inactive]; got=%q", r.status),
})
}
if len(r.statusRules) == 0 {
vErrs = append(vErrs, validationErr{
Field: fieldNotificationRuleStatusRules,
Msg: "must provide at least 1",
})
}
var sRuleErrs []validationErr
for i, sRule := range r.statusRules {
if notification.ParseCheckLevel(sRule.curLvl) == notification.Unknown {
sRuleErrs = append(sRuleErrs, validationErr{
Field: fieldNotificationRuleCurrentLevel,
Msg: fmt.Sprintf("must be 1 in [CRIT, WARN, INFO, OK]; got=%q", sRule.curLvl),
Index: intPtr(i),
})
}
if sRule.prevLvl != "" && notification.ParseCheckLevel(sRule.prevLvl) == notification.Unknown {
sRuleErrs = append(sRuleErrs, validationErr{
Field: fieldNotificationRulePreviousLevel,
Msg: fmt.Sprintf("must be 1 in [CRIT, WARN, INFO, OK]; got=%q", sRule.prevLvl),
Index: intPtr(i),
})
}
}
if len(sRuleErrs) > 0 {
vErrs = append(vErrs, validationErr{
Field: fieldNotificationRuleStatusRules,
Nested: sRuleErrs,
})
}
var tagErrs []validationErr
for i, tRule := range r.tagRules {
if _, ok := influxdb.ToOperator(tRule.op); !ok {
tagErrs = append(tagErrs, validationErr{
Field: fieldOperator,
Msg: fmt.Sprintf("must be 1 in [equal]; got=%q", tRule.op),
Index: intPtr(i),
})
}
}
if len(tagErrs) > 0 {
vErrs = append(vErrs, validationErr{
Field: fieldNotificationRuleTagRules,
Nested: tagErrs,
})
}
if len(vErrs) > 0 {
return []validationErr{
objectValidationErr(fieldSpec, vErrs...),
}
}
return nil
}
func toSummaryStatusRules(statusRules []struct{ curLvl, prevLvl string }) []SummaryStatusRule {
out := make([]SummaryStatusRule, 0, len(statusRules))
for _, sRule := range statusRules {
out = append(out, SummaryStatusRule{
CurrentLevel: sRule.curLvl,
PreviousLevel: sRule.prevLvl,
})
}
sort.Slice(out, func(i, j int) bool {
si, sj := out[i], out[j]
if si.CurrentLevel == sj.CurrentLevel {
return si.PreviousLevel < sj.PreviousLevel
}
return si.CurrentLevel < sj.CurrentLevel
})
return out
}
func toSummaryTagRules(tagRules []struct{ k, v, op string }) []SummaryTagRule {
out := make([]SummaryTagRule, 0, len(tagRules))
for _, tRule := range tagRules {
out = append(out, SummaryTagRule{
Key: tRule.k,
Value: tRule.v,
Operator: tRule.op,
})
}
sort.Slice(out, func(i, j int) bool {
ti, tj := out[i], out[j]
if ti.Key == tj.Key && ti.Value == tj.Value {
return ti.Operator < tj.Operator
}
if ti.Key == tj.Key {
return ti.Value < tj.Value
}
return ti.Key < tj.Key
})
return out
}
const (
fieldTaskCron = "cron"
)
@ -1724,3 +1948,82 @@ func (v *variable) valid() []validationErr {
return nil
}
const (
fieldReferencesEnv = "envRef"
fieldReferencesSecret = "secretRef"
)
type references struct {
val interface{}
EnvRef string
Secret string
}
func (r *references) hasValue() bool {
return r.EnvRef != "" || r.Secret != "" || r.val != nil
}
func (r *references) String() string {
if r == nil {
return ""
}
if v := r.StringVal(); v != "" {
return v
}
if r.EnvRef != "" {
return "$" + r.EnvRef
}
return ""
}
func (r *references) StringVal() string {
if r.val != nil {
s, _ := r.val.(string)
return s
}
return ""
}
func (r *references) SecretField() influxdb.SecretField {
if secret := r.Secret; secret != "" {
return influxdb.SecretField{Key: secret}
}
if str := r.StringVal(); str != "" {
return influxdb.SecretField{Value: &str}
}
return influxdb.SecretField{}
}
func isValidName(name string, minLength int) (validationErr, bool) {
if len(name) >= minLength {
return validationErr{}, true
}
return validationErr{
Field: fieldName,
Msg: fmt.Sprintf("must be a string of at least %d chars in length", minLength),
}, false
}
func toNotificationDuration(dur time.Duration) *notification.Duration {
d, _ := notification.FromTimeDuration(dur)
return &d
}
func durToStr(dur time.Duration) string {
if dur == 0 {
return ""
}
return dur.String()
}
func flt64Ptr(f float64) *float64 {
if f != 0 {
return &f
}
return nil
}
func intPtr(i int) *int {
return &i
}

View File

@ -3088,7 +3088,7 @@ spec:
rule := rules[0]
assert.Equal(t, "rule_0", rule.Name)
assert.Equal(t, "endpoint_0", rule.EndpointName)
assert.Equal(t, "endpoint_0", rule.EndpointPkgName)
assert.Equal(t, "desc_0", rule.Description)
assert.Equal(t, (10 * time.Minute).String(), rule.Every)
assert.Equal(t, (30 * time.Second).String(), rule.Offset)
@ -3108,12 +3108,27 @@ spec:
}
assert.Equal(t, expectedTagRules, rule.TagRules)
require.Len(t, sum.Labels, 1)
require.Len(t, rule.LabelAssociations, 1)
require.Len(t, sum.Labels, 2)
require.Len(t, rule.LabelAssociations, 2)
assert.Equal(t, "label_1", rule.LabelAssociations[0].PkgName)
assert.Equal(t, "label_2", rule.LabelAssociations[1].PkgName)
})
})
t.Run("handles bad config", func(t *testing.T) {
pkgWithValidEndpint := func(resource string) string {
return fmt.Sprintf(`
apiVersion: influxdata.com/v2alpha1
kind: NotificationEndpointSlack
metadata:
name: endpoint_0
spec:
url: https://hooks.slack.com/services/bip/piddy/boppidy
---
%s
`, resource)
}
tests := []struct {
kind Kind
resErr testPkgResourceError
@ -3121,10 +3136,9 @@ spec:
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "missing name",
validationErrs: 1,
valFields: []string{fieldMetadata, fieldName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "missing name",
valFields: []string{fieldMetadata, fieldName},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
spec:
@ -3133,16 +3147,15 @@ spec:
messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }"
statusRules:
- currentLevel: WARN
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "missing endpoint name",
validationErrs: 1,
valFields: []string{fieldSpec, fieldNotificationRuleEndpointName},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "missing endpoint name",
valFields: []string{fieldSpec, fieldNotificationRuleEndpointName},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_0
@ -3151,16 +3164,15 @@ spec:
messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }"
statusRules:
- currentLevel: WARN
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "missing every",
validationErrs: 1,
valFields: []string{fieldSpec, fieldEvery},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "missing every",
valFields: []string{fieldSpec, fieldEvery},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_0
@ -3169,52 +3181,49 @@ spec:
messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }"
statusRules:
- currentLevel: WARN
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "missing status rules",
validationErrs: 1,
valFields: []string{fieldSpec, fieldNotificationRuleStatusRules},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "missing status rules",
valFields: []string{fieldSpec, fieldNotificationRuleStatusRules},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_0
spec:
every: 10m
endpointName: 10m
endpointName: endpoint_0
messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }"
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "bad current status rule level",
validationErrs: 1,
valFields: []string{fieldSpec, fieldNotificationRuleStatusRules},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "bad current status rule level",
valFields: []string{fieldSpec, fieldNotificationRuleStatusRules},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_0
spec:
every: 10m
endpointName: 10m
endpointName: endpoint_0
messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }"
statusRules:
- currentLevel: WRONGO
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "bad previous status rule level",
validationErrs: 1,
valFields: []string{fieldSpec, fieldNotificationRuleStatusRules},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "bad previous status rule level",
valFields: []string{fieldSpec, fieldNotificationRuleStatusRules},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_0
@ -3225,16 +3234,15 @@ spec:
statusRules:
- currentLevel: CRIT
previousLevel: WRONG
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "bad tag rule operator",
validationErrs: 1,
valFields: []string{fieldSpec, fieldNotificationRuleTagRules},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "bad tag rule operator",
valFields: []string{fieldSpec, fieldNotificationRuleTagRules},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_0
@ -3248,16 +3256,15 @@ spec:
- key: k1
value: v2
operator: WRONG
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "bad status provided",
validationErrs: 1,
valFields: []string{fieldSpec, fieldStatus},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "bad status provided",
valFields: []string{fieldSpec, fieldStatus},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_0
@ -3268,16 +3275,15 @@ spec:
status: RANDO STATUS
statusRules:
- currentLevel: WARN
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "label association does not exist",
validationErrs: 1,
valFields: []string{fieldSpec, fieldAssociations},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "label association does not exist",
valFields: []string{fieldSpec, fieldAssociations},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_0
@ -3290,16 +3296,15 @@ spec:
associations:
- kind: Label
name: label_1
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "label association dupe",
validationErrs: 1,
valFields: []string{fieldSpec, fieldAssociations},
pkgStr: `apiVersion: influxdata.com/v2alpha1
name: "label association dupe",
valFields: []string{fieldSpec, fieldAssociations},
pkgStr: pkgWithValidEndpint(`apiVersion: influxdata.com/v2alpha1
kind: Label
metadata:
name: label_1
@ -3319,16 +3324,15 @@ spec:
name: label_1
- kind: Label
name: label_1
`,
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "duplicate meta names",
validationErrs: 1,
valFields: []string{fieldMetadata, fieldName},
pkgStr: `
name: "duplicate meta names",
valFields: []string{fieldMetadata, fieldName},
pkgStr: pkgWithValidEndpint(`
apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
@ -3350,6 +3354,25 @@ spec:
messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }"
statusRules:
- currentLevel: WARN
`),
},
},
{
kind: KindNotificationRule,
resErr: testPkgResourceError{
name: "missing endpoint association in pkg",
valFields: []string{fieldSpec, fieldNotificationRuleEndpointName},
pkgStr: `
apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_0
spec:
endpointName: RANDO_ENDPOINT_NAME
every: 10m
messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }"
statusRules:
- currentLevel: WARN
`,
},
},
@ -3932,7 +3955,7 @@ spec:
require.Len(t, sum.NotificationRules, 1)
assert.Equal(t, "$rule-1-name-ref", sum.NotificationRules[0].Name)
assert.Equal(t, "$endpoint-1-name-ref", sum.NotificationRules[0].EndpointName)
assert.Equal(t, "$endpoint-1-name-ref", sum.NotificationRules[0].EndpointPkgName)
hasEnv(t, pkg.mEnv, "rule-1-name-ref")
require.Len(t, sum.Tasks, 1)

View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"net/url"
"sort"
"strings"
"sync"
"time"
@ -636,11 +635,14 @@ func (s *Service) filterOrgResourceKinds(resourceKindFilters []Kind) []struct {
// for later calls to Apply. This func will be run on an Apply if it has not been run
// already.
func (s *Service) DryRun(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (Summary, Diff, error) {
sum, diff, _, err := s.dryRun(ctx, orgID, pkg, opts...)
return sum, diff, err
state, err := s.dryRun(ctx, orgID, pkg, opts...)
if err != nil {
return Summary{}, Diff{}, err
}
return newSummaryFromStatePkg(state, pkg), state.diff(), nil
}
func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (Summary, Diff, *stateCoordinator, error) {
func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (*stateCoordinator, error) {
// so here's the deal, when we have issues with the parsing validation, we
// continue to do the diff anyhow. any resource that does not have a name
// will be skipped, and won't bleed into the dry run here. We can now return
@ -649,7 +651,7 @@ func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts
if !pkg.isParsed {
err := pkg.Validate()
if err != nil && !IsParseErr(err) {
return Summary{}, Diff{}, nil, internalErr(err)
return nil, internalErr(err)
}
parseErr = err
}
@ -659,7 +661,7 @@ func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts
if len(opt.EnvRefs) > 0 {
err := pkg.applyEnvRefs(opt.EnvRefs)
if err != nil && !IsParseErr(err) {
return Summary{}, Diff{}, nil, internalErr(err)
return nil, internalErr(err)
}
parseErr = err
}
@ -667,13 +669,13 @@ func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts
state := newStateCoordinator(pkg)
if opt.StackID > 0 {
if err := s.addStackState(ctx, opt.StackID, pkg, state); err != nil {
return Summary{}, Diff{}, nil, internalErr(err)
if err := s.addStackState(ctx, opt.StackID, state); err != nil {
return nil, internalErr(err)
}
}
if err := s.dryRunSecrets(ctx, orgID, pkg); err != nil {
return Summary{}, Diff{}, nil, err
return nil, err
}
s.dryRunBuckets(ctx, orgID, state.mBuckets)
@ -682,41 +684,21 @@ func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts
s.dryRunVariables(ctx, orgID, state.mVariables)
err := s.dryRunNotificationEndpoints(ctx, orgID, state.mEndpoints)
if err != nil {
return Summary{}, Diff{}, nil, ierrors.Wrap(err, "failed to dry run notification endpoints")
return nil, ierrors.Wrap(err, "failed to dry run notification endpoints")
}
var diff Diff
diffRules, err := s.dryRunNotificationRules(ctx, orgID, pkg)
err = s.dryRunNotificationRules(ctx, orgID, state.mRules, state.mEndpoints)
if err != nil {
return Summary{}, Diff{}, nil, err
return nil, err
}
diff.NotificationRules = diffRules
stateLabelMappings, err := s.dryRunLabelMappingsV2(ctx, state)
stateLabelMappings, err := s.dryRunLabelMappings(ctx, state)
if err != nil {
return Summary{}, Diff{}, nil, err
return nil, err
}
state.labelMappings = stateLabelMappings
stateDiff := state.diff()
diffLabelMappings, err := s.dryRunLabelMappings(ctx, pkg, state)
if err != nil {
return Summary{}, Diff{}, nil, err
}
diff.LabelMappings = append(diffLabelMappings, diffLabelMappings...)
diff.Buckets = stateDiff.Buckets
diff.Checks = stateDiff.Checks
diff.Dashboards = stateDiff.Dashboards
diff.NotificationEndpoints = stateDiff.NotificationEndpoints
diff.Labels = stateDiff.Labels
diff.Tasks = stateDiff.Tasks
diff.Telegrafs = stateDiff.Telegrafs
diff.Variables = stateDiff.Variables
diff.LabelMappings = append(stateDiff.LabelMappings, diff.LabelMappings...)
return newSummaryFromStatePkg(pkg, state), diff, state, parseErr
return state, parseErr
}
func (s *Service) dryRunBuckets(ctx context.Context, orgID influxdb.ID, bkts map[string]*stateBucket) {
@ -804,26 +786,12 @@ func (s *Service) dryRunNotificationEndpoints(ctx context.Context, orgID influxd
return nil
}
func (s *Service) dryRunNotificationRules(ctx context.Context, orgID influxdb.ID, pkg *Pkg) ([]DiffNotificationRule, error) {
iEndpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{
OrgID: &orgID,
})
if err != nil {
return nil, internalErr(err)
}
mExistingEndpointsByName := make(map[string]influxdb.NotificationEndpoint)
mExistingEndpointsByID := make(map[influxdb.ID]influxdb.NotificationEndpoint)
for _, e := range iEndpoints {
mExistingEndpointsByName[e.GetName()] = e
mExistingEndpointsByID[e.GetID()] = e
}
func (s *Service) dryRunNotificationRules(ctx context.Context, orgID influxdb.ID, rules map[string]*stateRule, endpoints map[string]*stateEndpoint) error {
iRules, _, err := s.ruleSVC.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{
OrgID: &orgID,
}, influxdb.FindOptions{Limit: 100})
if err != nil {
return nil, internalErr(err)
return internalErr(err)
}
mExistingRulesByID := make(map[influxdb.ID]influxdb.NotificationRule)
@ -831,41 +799,20 @@ func (s *Service) dryRunNotificationRules(ctx context.Context, orgID influxdb.ID
mExistingRulesByID[r.GetID()] = r
}
mPkgEndpoints := make(map[string]influxdb.NotificationEndpoint)
for _, e := range pkg.mNotificationEndpoints {
influxEndpoint := e.summarize().NotificationEndpoint
mPkgEndpoints[e.PkgName()] = influxEndpoint
}
diffs := make([]DiffNotificationRule, 0)
for _, r := range pkg.notificationRules() {
e, ok := mExistingEndpointsByName[r.endpointName.String()]
for _, r := range rules {
e, ok := endpoints[r.parserRule.associatedEndpoint.PkgName()]
if !ok {
influxEndpoint, ok := mPkgEndpoints[r.endpointName.String()]
if !ok {
err := fmt.Errorf("failed to find notification endpoint %q dependency for notification rule %q", r.endpointName, r.Name())
return nil, &influxdb.Error{Code: influxdb.EUnprocessableEntity, Err: err}
}
e = influxEndpoint
}
if iRule, ok := mExistingRulesByID[r.ID()]; ok {
var endpointName, endpointType string
if e, ok := mExistingRulesByID[iRule.GetEndpointID()]; ok {
endpointName = e.GetName()
endpointType = e.Type()
}
r.existing = &existingRule{
rule: iRule,
endpointName: endpointName,
endpointType: endpointType,
err := fmt.Errorf("failed to find notification endpoint %q dependency for notification rule %q", r.parserRule.endpointName, r.parserRule.Name())
return &influxdb.Error{
Code: influxdb.EUnprocessableEntity,
Err: err,
}
}
diffs = append(diffs, newDiffNotificationRule(r, e))
r.associatedEndpoint = e
r.existing = mExistingRulesByID[r.ID()]
}
return diffs, nil
return nil
}
func (s *Service) dryRunSecrets(ctx context.Context, orgID influxdb.ID, pkg *Pkg) error {
@ -910,121 +857,7 @@ func (s *Service) dryRunVariables(ctx context.Context, orgID influxdb.ID, vars m
}
}
type (
labelMappingDiffFn func(labelID influxdb.ID, labelPkgName, labelName string, isNew bool)
labelMappers interface {
Association(i int) labelAssociater
Len() int
}
labelAssociater interface {
ID() influxdb.ID
Name() string
PkgName() string
Labels() []*label
ResourceType() influxdb.ResourceType
Exists() bool
}
)
func (s *Service) dryRunLabelMappings(ctx context.Context, pkg *Pkg, state *stateCoordinator) ([]DiffLabelMapping, error) {
mappers := []labelMappers{
mapperNotificationRules(pkg.notificationRules()),
}
diffs := make([]DiffLabelMapping, 0)
for _, mapper := range mappers {
for i := 0; i < mapper.Len(); i++ {
la := mapper.Association(i)
err := s.dryRunResourceLabelMapping(ctx, la, func(labelID influxdb.ID, labelPkgName, labelName string, isNew bool) {
existingLabel, ok := state.mLabels[labelName]
if !ok {
return
}
existingLabel.parserLabel.setMapping(la, !isNew)
status := StateStatusExists
if isNew {
status = StateStatusNew
}
diffs = append(diffs, DiffLabelMapping{
StateStatus: status,
ResType: la.ResourceType(),
ResID: SafeID(la.ID()),
ResPkgName: la.PkgName(),
ResName: la.Name(),
LabelID: SafeID(labelID),
LabelPkgName: labelPkgName,
LabelName: labelName,
})
})
if err != nil {
return nil, internalErr(err)
}
}
}
// sort by res type ASC, then res name ASC, then label name ASC
sort.Slice(diffs, func(i, j int) bool {
n, m := diffs[i], diffs[j]
if n.ResType < m.ResType {
return true
}
if n.ResType > m.ResType {
return false
}
if n.ResName < m.ResName {
return true
}
if n.ResName > m.ResName {
return false
}
return n.LabelName < m.LabelName
})
return diffs, nil
}
func (s *Service) dryRunResourceLabelMapping(ctx context.Context, la labelAssociater, mappingFn labelMappingDiffFn) error {
if !la.Exists() {
for _, l := range la.Labels() {
mappingFn(l.ID(), l.PkgName(), l.Name(), true)
}
return nil
}
// loop through and hit api for all labels associated with a bkt
// lookup labels in pkg, add it to the label mapping, if exists in
// the results from API, mark it exists
existingLabels, err := s.labelSVC.FindResourceLabels(ctx, influxdb.LabelMappingFilter{
ResourceID: la.ID(),
ResourceType: la.ResourceType(),
})
if err != nil {
// TODO: inspect err, if its a not found error, do nothing, if any other error
// handle it better
return err
}
pkgLabels := labelSlcToMap(la.Labels())
for _, l := range existingLabels {
// if label is found in state then we track the mapping and mark it existing
// otherwise we continue on
delete(pkgLabels, l.Name)
if pkgLabel, ok := pkgLabels[l.Name]; ok {
mappingFn(l.ID, pkgLabel.PkgName(), l.Name, false)
}
}
// now we add labels that were not apart of the existing labels
for _, l := range pkgLabels {
mappingFn(l.ID(), l.PkgName(), l.Name(), true)
}
return nil
}
func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordinator) ([]stateLabelMapping, error) {
func (s *Service) dryRunLabelMappings(ctx context.Context, state *stateCoordinator) ([]stateLabelMapping, error) {
stateLabelsByResName := make(map[string]*stateLabel)
for _, l := range state.mLabels {
if IsRemoval(l.stateStatus) {
@ -1038,7 +871,7 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
if IsRemoval(b.stateStatus) {
continue
}
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, b)
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, b)
if err != nil {
return nil, err
}
@ -1049,7 +882,7 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
if IsRemoval(c.stateStatus) {
continue
}
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, c)
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, c)
if err != nil {
return nil, err
}
@ -1060,7 +893,7 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
if IsRemoval(d.stateStatus) {
continue
}
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, d)
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, d)
if err != nil {
return nil, err
}
@ -1071,7 +904,18 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
if IsRemoval(e.stateStatus) {
continue
}
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, e)
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, e)
if err != nil {
return nil, err
}
mappings = append(mappings, mm...)
}
for _, r := range state.mRules {
if IsRemoval(r.stateStatus) {
continue
}
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, r)
if err != nil {
return nil, err
}
@ -1082,7 +926,7 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
if IsRemoval(t.stateStatus) {
continue
}
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, t)
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, t)
if err != nil {
return nil, err
}
@ -1093,7 +937,7 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
if IsRemoval(t.stateStatus) {
continue
}
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, t)
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, t)
if err != nil {
return nil, err
}
@ -1104,7 +948,7 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
if IsRemoval(v.stateStatus) {
continue
}
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, v)
mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, v)
if err != nil {
return nil, err
}
@ -1114,7 +958,7 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
return mappings, nil
}
func (s *Service) dryRunResourceLabelMappingV2(ctx context.Context, state *stateCoordinator, stateLabelsByResName map[string]*stateLabel, associatedResource interface {
func (s *Service) dryRunResourceLabelMapping(ctx context.Context, state *stateCoordinator, stateLabelsByResName map[string]*stateLabel, associatedResource interface {
labels() []*label
stateIdentity() stateIdentity
}) ([]stateLabelMapping, error) {
@ -1168,43 +1012,16 @@ func (s *Service) dryRunResourceLabelMappingV2(ctx context.Context, state *state
return mappings, nil
}
func (s *Service) addStackState(ctx context.Context, stackID influxdb.ID, pkg *Pkg, state *stateCoordinator) error {
func (s *Service) addStackState(ctx context.Context, stackID influxdb.ID, state *stateCoordinator) error {
stack, err := s.store.ReadStackByID(ctx, stackID)
if err != nil {
return ierrors.Wrap(internalErr(err), "reading stack")
}
type stateMapper interface {
Contains(kind Kind, pkgName string) bool
setObjectID(kind Kind, pkgName string, id influxdb.ID)
addObjectForRemoval(kind Kind, pkgName string, id influxdb.ID)
}
stateKinds := []Kind{
KindBucket,
KindCheck,
KindLabel,
KindNotificationEndpoint,
KindTask,
KindVariable,
}
// check resource exists in pkg
// if exists
// set id on existing pkg resource
// else
// add stub pkg resource that indicates it should be deleted
for _, r := range stack.Resources {
var mapper stateMapper = pkg
if r.Kind.is(stateKinds...) {
// hack for time being while we transition state out of pkg.
// this will take several passes to finish up.
mapper = state
}
updateFn := mapper.setObjectID
if !mapper.Contains(r.Kind, r.Name) {
updateFn = mapper.addObjectForRemoval
updateFn := state.setObjectID
if !state.Contains(r.Kind, r.Name) {
updateFn = state.addObjectForRemoval
}
updateFn(r.Kind, r.Name, r.ID)
}
@ -1267,7 +1084,7 @@ func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg
return Summary{}, Diff{}, failedValidationErr(err)
}
_, diff, state, err := s.dryRun(ctx, orgID, pkg, opts...)
state, err := s.dryRun(ctx, orgID, pkg, opts...)
if err != nil {
return Summary{}, Diff{}, err
}
@ -1281,7 +1098,7 @@ func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg
if e != nil {
updateStackFn = s.updateStackAfterRollback
}
if err := updateStackFn(ctx, stackID, pkg, state); err != nil {
if err := updateStackFn(ctx, stackID, state); err != nil {
s.log.Error("failed to update stack", zap.Error(err))
}
}(opt.StackID)
@ -1289,15 +1106,17 @@ func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg
coordinator := &rollbackCoordinator{sem: make(chan struct{}, s.applyReqLimit)}
defer coordinator.rollback(s.log, &e, orgID)
sum, err = s.apply(ctx, coordinator, orgID, userID, pkg, state, opt.MissingSecrets)
err = s.applyState(ctx, coordinator, orgID, userID, state, opt.MissingSecrets)
if err != nil {
return Summary{}, Diff{}, err
}
return sum, diff, err
pkg.applySecrets(opt.MissingSecrets)
return newSummaryFromStatePkg(state, pkg), state.diff(), err
}
func (s *Service) apply(ctx context.Context, coordinator *rollbackCoordinator, orgID, userID influxdb.ID, pkg *Pkg, state *stateCoordinator, missingSecrets map[string]string) (sum Summary, e error) {
func (s *Service) applyState(ctx context.Context, coordinator *rollbackCoordinator, orgID, userID influxdb.ID, state *stateCoordinator, missingSecrets map[string]string) (e error) {
// each grouping here runs for its entirety, then returns an error that
// is indicative of running all appliers provided. For instance, the labels
// may have 1 variable fail and one of the buckets fails. The errors aggregate so
@ -1331,33 +1150,28 @@ func (s *Service) apply(ctx context.Context, coordinator *rollbackCoordinator, o
for _, group := range appliers {
if err := coordinator.runTilEnd(ctx, orgID, userID, group...); err != nil {
return Summary{}, internalErr(err)
return internalErr(err)
}
}
// this has to be run after the above primary resources, because it relies on
// notification endpoints already being applied.
app, err := s.applyNotificationRulesGenerator(ctx, orgID, pkg, state.endpoints())
app, err := s.applyNotificationRulesGenerator(state.rules(), state.mEndpoints)
if err != nil {
return Summary{}, err
return err
}
if err := coordinator.runTilEnd(ctx, orgID, userID, app); err != nil {
return Summary{}, err
return err
}
// secondary resources
// this last grouping relies on the above 2 steps having completely successfully
secondary := []applier{
s.applyLabelMappings(pkg.labelMappings()),
s.applyLabelMappingsV2(state.labelMappings),
}
secondary := []applier{s.applyLabelMappings(state.labelMappings)}
if err := coordinator.runTilEnd(ctx, orgID, userID, secondary...); err != nil {
return Summary{}, internalErr(err)
return internalErr(err)
}
pkg.applySecrets(missingSecrets)
return newSummaryFromStatePkg(pkg, state), nil
return nil
}
func (s *Service) applyBuckets(ctx context.Context, buckets []*stateBucket) applier {
@ -1688,7 +1502,6 @@ func (s *Service) applyLabels(ctx context.Context, labels []*stateLabel) applier
mutex.Do(func() {
labels[i].id = influxLabel.ID
labels[i].parserLabel.id = influxLabel.ID
rollBackLabels = append(rollBackLabels, labels[i])
})
@ -1891,56 +1704,21 @@ func (s *Service) rollbackNotificationEndpoints(ctx context.Context, userID infl
return nil
}
func (s *Service) applyNotificationRulesGenerator(ctx context.Context, orgID influxdb.ID, pkg *Pkg, stateEndpoints []*stateEndpoint) (applier, error) {
endpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{
OrgID: &orgID,
})
if err != nil {
return applier{}, internalErr(err)
}
type mVal struct {
id influxdb.ID
eType string
}
mEndpointsByPkgName := make(map[string]mVal)
for _, e := range endpoints {
mEndpointsByPkgName[e.GetName()] = mVal{
id: e.GetID(),
eType: e.Type(),
}
}
for _, e := range stateEndpoints {
if IsRemoval(e.stateStatus) {
continue
}
if _, ok := mEndpointsByPkgName[e.parserEndpoint.PkgName()]; ok {
continue
}
mEndpointsByPkgName[e.parserEndpoint.PkgName()] = mVal{
id: e.ID(),
eType: e.summarize().NotificationEndpoint.Type(),
}
}
rules := pkg.notificationRules()
func (s *Service) applyNotificationRulesGenerator(rules []*stateRule, stateEndpoints map[string]*stateEndpoint) (applier, error) {
var errs applyErrs
for _, r := range rules {
v, ok := mEndpointsByPkgName[r.endpointName.String()]
v, ok := stateEndpoints[r.parserRule.associatedEndpoint.PkgName()]
if !ok {
errs = append(errs, &applyErrBody{
name: r.Name(),
msg: fmt.Sprintf("notification rule endpoint dependency does not exist; endpointName=%q", r.endpointName),
name: r.parserRule.Name(),
msg: fmt.Sprintf("notification rule endpoint dependency does not exist; endpointName=%q", r.parserRule.associatedEndpoint.PkgName()),
})
continue
}
r.endpointID = v.id
r.endpointType = v.eType
r.associatedEndpoint = v
}
err = errs.toError("notification_rules", "failed to find dependency")
err := errs.toError("notification_rules", "failed to find dependency")
if err != nil {
return applier{}, err
}
@ -1948,23 +1726,23 @@ func (s *Service) applyNotificationRulesGenerator(ctx context.Context, orgID inf
return s.applyNotificationRules(rules), nil
}
func (s *Service) applyNotificationRules(rules []*notificationRule) applier {
func (s *Service) applyNotificationRules(rules []*stateRule) applier {
const resource = "notification_rules"
mutex := new(doMutex)
rollbackEndpoints := make([]*notificationRule, 0, len(rules))
rollbackEndpoints := make([]*stateRule, 0, len(rules))
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
var rule notificationRule
var rule *stateRule
mutex.Do(func() {
rules[i].orgID = orgID
rule = *rules[i]
rule = rules[i]
})
influxRule, err := s.applyNotificationRule(ctx, rule, userID)
if err != nil {
return &applyErrBody{
name: rule.Name(),
name: rule.parserRule.PkgName(),
msg: err.Error(),
}
}
@ -1991,20 +1769,20 @@ func (s *Service) applyNotificationRules(rules []*notificationRule) applier {
}
}
func (s *Service) applyNotificationRule(ctx context.Context, e notificationRule, userID influxdb.ID) (influxdb.NotificationRule, error) {
actual := influxdb.NotificationRuleCreate{
NotificationRule: e.toInfluxRule(),
Status: e.Status(),
func (s *Service) applyNotificationRule(ctx context.Context, r *stateRule, userID influxdb.ID) (influxdb.NotificationRule, error) {
influxRule := influxdb.NotificationRuleCreate{
NotificationRule: r.toInfluxRule(),
Status: r.parserRule.Status(),
}
err := s.ruleSVC.CreateNotificationRule(ctx, actual, userID)
err := s.ruleSVC.CreateNotificationRule(ctx, influxRule, userID)
if err != nil {
return nil, err
}
return actual, nil
return influxRule, nil
}
func (s *Service) rollbackNotificationRules(rules []*notificationRule) error {
func (s *Service) rollbackNotificationRules(rules []*stateRule) error {
var errs []string
for _, e := range rules {
err := s.ruleSVC.DeleteNotificationRule(context.Background(), e.ID())
@ -2265,7 +2043,7 @@ func (s *Service) applyVariable(ctx context.Context, v *stateVariable) (influxdb
}
}
func (s *Service) applyLabelMappingsV2(labelMappings []stateLabelMapping) applier {
func (s *Service) applyLabelMappings(labelMappings []stateLabelMapping) applier {
const resource = "label_mapping"
mutex := new(doMutex)
@ -2315,12 +2093,12 @@ func (s *Service) applyLabelMappingsV2(labelMappings []stateLabelMapping) applie
},
rollbacker: rollbacker{
resource: resource,
fn: func(_ influxdb.ID) error { return s.rollbackLabelMappingsV2(rollbackMappings) },
fn: func(_ influxdb.ID) error { return s.rollbackLabelMappings(rollbackMappings) },
},
}
}
func (s *Service) rollbackLabelMappingsV2(mappings []stateLabelMapping) error {
func (s *Service) rollbackLabelMappings(mappings []stateLabelMapping) error {
var errs []string
for _, stateMapping := range mappings {
influxMapping := stateLabelMappingToInfluxLabelMapping(stateMapping)
@ -2337,77 +2115,6 @@ func (s *Service) rollbackLabelMappingsV2(mappings []stateLabelMapping) error {
return nil
}
func (s *Service) applyLabelMappings(labelMappings []SummaryLabelMapping) applier {
const resource = "label_mapping"
mutex := new(doMutex)
rollbackMappings := make([]influxdb.LabelMapping, 0, len(labelMappings))
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
var mapping SummaryLabelMapping
mutex.Do(func() {
mapping = labelMappings[i]
})
if IsExisting(mapping.Status) || mapping.LabelID == 0 || mapping.ResourceID == 0 {
// this block here does 2 things, it does not write a
// mapping when one exists. it also avoids having to worry
// about deleting an existing mapping since it will not be
// passed to the delete function below b/c it is never added
// to the list of mappings that is referenced in the delete
// call.
return nil
}
m := influxdb.LabelMapping{
LabelID: influxdb.ID(mapping.LabelID),
ResourceID: influxdb.ID(mapping.ResourceID),
ResourceType: mapping.ResourceType,
}
err := s.labelSVC.CreateLabelMapping(ctx, &m)
if err != nil {
return &applyErrBody{
name: fmt.Sprintf("%s:%s:%s", mapping.ResourceType, mapping.ResourceID, mapping.LabelID),
msg: err.Error(),
}
}
mutex.Do(func() {
rollbackMappings = append(rollbackMappings, m)
})
return nil
}
return applier{
creater: creater{
entries: len(labelMappings),
fn: createFn,
},
rollbacker: rollbacker{
resource: resource,
fn: func(_ influxdb.ID) error { return s.rollbackLabelMappings(rollbackMappings) },
},
}
}
func (s *Service) rollbackLabelMappings(mappings []influxdb.LabelMapping) error {
var errs []string
for i := range mappings {
l := mappings[i]
err := s.labelSVC.DeleteLabelMapping(context.Background(), &l)
if err != nil {
errs = append(errs, fmt.Sprintf("%s:%s", l.LabelID.String(), l.ResourceID.String()))
}
}
if len(errs) > 0 {
return fmt.Errorf(`label_resource_id_pairs=[%s] err="unable to delete label"`, strings.Join(errs, ", "))
}
return nil
}
func (s *Service) deleteByIDs(resource string, numIDs int, deleteFn func(context.Context, influxdb.ID) error, iterFn func(int) influxdb.ID) error {
var errs []string
for i := range make([]struct{}, numIDs) {
@ -2425,7 +2132,7 @@ func (s *Service) deleteByIDs(resource string, numIDs int, deleteFn func(context
return nil
}
func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.ID, pkg *Pkg, state *stateCoordinator) error {
func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.ID, state *stateCoordinator) error {
stack, err := s.store.ReadStackByID(ctx, stackID)
if err != nil {
return err
@ -2493,7 +2200,7 @@ func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID influxdb.
return s.store.UpdateStack(ctx, stack)
}
func (s *Service) updateStackAfterRollback(ctx context.Context, stackID influxdb.ID, pkg *Pkg, state *stateCoordinator) error {
func (s *Service) updateStackAfterRollback(ctx context.Context, stackID influxdb.ID, state *stateCoordinator) error {
stack, err := s.store.ReadStackByID(ctx, stackID)
if err != nil {
return err
@ -2607,40 +2314,11 @@ func (s *Service) getAllPlatformVariables(ctx context.Context, orgID influxdb.ID
return existingVars, nil
}
// temporary hack while integrations are needed.
func newSummaryFromStatePkg(pkg *Pkg, state *stateCoordinator) Summary {
func newSummaryFromStatePkg(state *stateCoordinator, pkg *Pkg) Summary {
stateSum := state.summary()
pkgSum := pkg.Summary()
pkgSum.Buckets = stateSum.Buckets
pkgSum.Checks = stateSum.Checks
pkgSum.Dashboards = stateSum.Dashboards
pkgSum.NotificationEndpoints = stateSum.NotificationEndpoints
pkgSum.Labels = stateSum.Labels
pkgSum.Tasks = stateSum.Tasks
pkgSum.TelegrafConfigs = stateSum.TelegrafConfigs
pkgSum.Variables = stateSum.Variables
// filter out label mappings that are from pgk and replace with those
// in state. This is temporary hack to provide a bridge to the promise land...
resourcesToSkip := map[influxdb.ResourceType]bool{
influxdb.BucketsResourceType: true,
influxdb.ChecksResourceType: true,
influxdb.DashboardsResourceType: true,
influxdb.NotificationEndpointResourceType: true,
influxdb.TasksResourceType: true,
influxdb.TelegrafsResourceType: true,
influxdb.VariablesResourceType: true,
}
for _, lm := range pkgSum.LabelMappings {
if resourcesToSkip[lm.ResourceType] {
continue
}
stateSum.LabelMappings = append(stateSum.LabelMappings, lm)
}
pkgSum.LabelMappings = stateSum.LabelMappings
return pkgSum
stateSum.MissingEnvs = pkg.missingEnvRefs()
stateSum.MissingSecrets = pkg.missingSecrets()
return stateSum
}
func getLabelIDMap(ctx context.Context, labelSVC influxdb.LabelService, labelNames []string) (map[influxdb.ID]bool, error) {

View File

@ -5,6 +5,7 @@ import (
"sort"
"github.com/influxdata/influxdb/v2"
"github.com/influxdata/influxdb/v2/notification/rule"
)
type stateCoordinator struct {
@ -13,6 +14,7 @@ type stateCoordinator struct {
mDashboards map[string]*stateDashboard
mEndpoints map[string]*stateEndpoint
mLabels map[string]*stateLabel
mRules map[string]*stateRule
mTasks map[string]*stateTask
mTelegrafs map[string]*stateTelegraf
mVariables map[string]*stateVariable
@ -27,6 +29,7 @@ func newStateCoordinator(pkg *Pkg) *stateCoordinator {
mDashboards: make(map[string]*stateDashboard),
mEndpoints: make(map[string]*stateEndpoint),
mLabels: make(map[string]*stateLabel),
mRules: make(map[string]*stateRule),
mTasks: make(map[string]*stateTask),
mTelegrafs: make(map[string]*stateTelegraf),
mVariables: make(map[string]*stateVariable),
@ -62,6 +65,12 @@ func newStateCoordinator(pkg *Pkg) *stateCoordinator {
stateStatus: StateStatusNew,
}
}
for _, pkgRule := range pkg.notificationRules() {
state.mRules[pkgRule.PkgName()] = &stateRule{
parserRule: pkgRule,
stateStatus: StateStatusNew,
}
}
for _, pkgTask := range pkg.tasks() {
state.mTasks[pkgTask.PkgName()] = &stateTask{
parserTask: pkgTask,
@ -124,6 +133,14 @@ func (s *stateCoordinator) labels() []*stateLabel {
return out
}
func (s *stateCoordinator) rules() []*stateRule {
out := make([]*stateRule, 0, len(s.mRules))
for _, r := range s.mRules {
out = append(out, r)
}
return out
}
func (s *stateCoordinator) tasks() []*stateTask {
out := make([]*stateTask, 0, len(s.mTasks))
for _, t := range s.mTasks {
@ -185,6 +202,13 @@ func (s *stateCoordinator) diff() Diff {
return diff.Labels[i].PkgName < diff.Labels[j].PkgName
})
for _, r := range s.mRules {
diff.NotificationRules = append(diff.NotificationRules, r.diffRule())
}
sort.Slice(diff.NotificationRules, func(i, j int) bool {
return diff.NotificationRules[i].PkgName < diff.NotificationRules[j].PkgName
})
for _, t := range s.mTasks {
diff.Tasks = append(diff.Tasks, t.diffTask())
}
@ -281,6 +305,16 @@ func (s *stateCoordinator) summary() Summary {
return sum.Labels[i].PkgName < sum.Labels[j].PkgName
})
for _, v := range s.mRules {
if IsRemoval(v.stateStatus) {
continue
}
sum.NotificationRules = append(sum.NotificationRules, v.summarize())
}
sort.Slice(sum.NotificationRules, func(i, j int) bool {
return sum.NotificationRules[i].PkgName < sum.NotificationRules[j].PkgName
})
for _, t := range s.mTasks {
if IsRemoval(t.stateStatus) {
continue
@ -383,6 +417,12 @@ func (s *stateCoordinator) addObjectForRemoval(k Kind, pkgName string, id influx
parserEndpoint: &notificationEndpoint{identity: newIdentity},
stateStatus: StateStatusRemove,
}
case KindNotificationRule:
s.mRules[pkgName] = &stateRule{
id: id,
parserRule: &notificationRule{identity: newIdentity},
stateStatus: StateStatusRemove,
}
case KindTask:
s.mTasks[pkgName] = &stateTask{
id: id,
@ -433,6 +473,12 @@ func (s *stateCoordinator) getObjectIDSetter(k Kind, pkgName string) (func(influ
r.id = id
r.stateStatus = StateStatusExists
}, ok
case KindNotificationRule:
r, ok := s.mRules[pkgName]
return func(id influxdb.ID) {
r.id = id
r.stateStatus = StateStatusExists
}, ok
case KindTask:
r, ok := s.mTasks[pkgName]
return func(id influxdb.ID) {
@ -883,6 +929,140 @@ func (e *stateEndpoint) summarize() SummaryNotificationEndpoint {
return sum
}
type stateRule struct {
id, orgID influxdb.ID
stateStatus StateStatus
associatedEndpoint *stateEndpoint
parserRule *notificationRule
existing influxdb.NotificationRule
}
func (r *stateRule) ID() influxdb.ID {
if !IsNew(r.stateStatus) && r.existing != nil {
return r.existing.GetID()
}
return r.id
}
func (r *stateRule) diffRule() DiffNotificationRule {
sum := DiffNotificationRule{
DiffIdentifier: DiffIdentifier{
ID: SafeID(r.ID()),
Remove: r.parserRule.shouldRemove,
PkgName: r.parserRule.PkgName(),
},
New: DiffNotificationRuleValues{
Name: r.parserRule.Name(),
Description: r.parserRule.description,
EndpointName: r.associatedEndpoint.parserEndpoint.Name(),
EndpointID: SafeID(r.associatedEndpoint.ID()),
EndpointType: r.associatedEndpoint.parserEndpoint.kind.String(),
Every: r.parserRule.every.String(),
Offset: r.parserRule.offset.String(),
MessageTemplate: r.parserRule.msgTemplate,
StatusRules: toSummaryStatusRules(r.parserRule.statusRules),
TagRules: toSummaryTagRules(r.parserRule.tagRules),
},
}
if r.existing == nil {
return sum
}
sum.Old = &DiffNotificationRuleValues{
Name: r.existing.GetName(),
Description: r.existing.GetDescription(),
EndpointName: r.existing.GetName(),
EndpointID: SafeID(r.existing.GetEndpointID()),
EndpointType: r.existing.Type(),
}
assignBase := func(b rule.Base) {
if b.Every != nil {
sum.Old.Every = b.Every.TimeDuration().String()
}
if b.Offset != nil {
sum.Old.Offset = b.Offset.TimeDuration().String()
}
for _, tr := range b.TagRules {
sum.Old.TagRules = append(sum.Old.TagRules, SummaryTagRule{
Key: tr.Key,
Value: tr.Value,
Operator: tr.Operator.String(),
})
}
for _, sr := range b.StatusRules {
sRule := SummaryStatusRule{CurrentLevel: sr.CurrentLevel.String()}
if sr.PreviousLevel != nil {
sRule.PreviousLevel = sr.PreviousLevel.String()
}
sum.Old.StatusRules = append(sum.Old.StatusRules, sRule)
}
}
switch p := r.existing.(type) {
case *rule.HTTP:
assignBase(p.Base)
case *rule.Slack:
assignBase(p.Base)
sum.Old.MessageTemplate = p.MessageTemplate
case *rule.PagerDuty:
assignBase(p.Base)
sum.Old.MessageTemplate = p.MessageTemplate
}
return sum
}
func (r *stateRule) labels() []*label {
return r.parserRule.labels
}
func (r *stateRule) resourceType() influxdb.ResourceType {
return KindNotificationRule.ResourceType()
}
func (r *stateRule) stateIdentity() stateIdentity {
return stateIdentity{
id: r.ID(),
name: r.parserRule.Name(),
pkgName: r.parserRule.PkgName(),
resourceType: r.resourceType(),
stateStatus: r.stateStatus,
}
}
func (r *stateRule) summarize() SummaryNotificationRule {
sum := r.parserRule.summarize()
sum.ID = SafeID(r.id)
sum.EndpointID = SafeID(r.associatedEndpoint.ID())
sum.EndpointPkgName = r.associatedEndpoint.parserEndpoint.PkgName()
sum.EndpointType = r.associatedEndpoint.parserEndpoint.kind.String()
return sum
}
func (r *stateRule) toInfluxRule() influxdb.NotificationRule {
influxRule := r.parserRule.toInfluxRule()
if r.ID() > 0 {
influxRule.SetID(r.ID())
}
if r.orgID > 0 {
influxRule.SetOrgID(r.orgID)
}
switch e := influxRule.(type) {
case *rule.HTTP:
e.EndpointID = r.associatedEndpoint.ID()
case *rule.PagerDuty:
e.EndpointID = r.associatedEndpoint.ID()
case *rule.Slack:
e.EndpointID = r.associatedEndpoint.ID()
}
return influxRule
}
type stateTask struct {
id, orgID influxdb.ID
stateStatus StateStatus

View File

@ -356,7 +356,7 @@ func TestService(t *testing.T) {
actual := diff.NotificationRules[0].New
assert.Equal(t, "rule_0", actual.Name)
assert.Equal(t, "desc_0", actual.Description)
assert.Equal(t, "http", actual.EndpointType)
assert.Equal(t, "slack", actual.EndpointType)
assert.Equal(t, existing.Name, actual.EndpointName)
assert.Equal(t, SafeID(*existing.ID), actual.EndpointID)
assert.Equal(t, (10 * time.Minute).String(), actual.Every)
@ -374,15 +374,6 @@ func TestService(t *testing.T) {
}
assert.Equal(t, expectedTagRules, actual.TagRules)
})
t.Run("should error if endpoint name is not in pkg or in platform", func(t *testing.T) {
testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, pkg *Pkg) {
svc := newTestService()
_, _, err := svc.DryRun(context.TODO(), influxdb.ID(100), 0, pkg)
require.Error(t, err)
})
})
})
t.Run("secrets not returns missing secrets", func(t *testing.T) {
@ -944,91 +935,6 @@ func TestService(t *testing.T) {
})
}
testLabelMappingFn := func(t *testing.T, filename string, numExpected int, settersFn func() []ServiceSetterFn) {
t.Helper()
t.Run("applies successfully", func(t *testing.T) {
t.Helper()
testfileRunner(t, filename, func(t *testing.T, pkg *Pkg) {
t.Helper()
fakeLabelSVC := mock.NewLabelService()
fakeLabelSVC.CreateLabelFn = func(_ context.Context, l *influxdb.Label) error {
l.ID = influxdb.ID(rand.Int())
return nil
}
fakeLabelSVC.CreateLabelMappingFn = func(_ context.Context, mapping *influxdb.LabelMapping) error {
if mapping.ResourceID == 0 {
return errors.New("did not get a resource ID")
}
if mapping.ResourceType == "" {
return errors.New("did not get a resource type")
}
return nil
}
svc := newTestService(append(settersFn(),
WithLabelSVC(fakeLabelSVC),
WithLogger(zaptest.NewLogger(t)),
)...)
orgID := influxdb.ID(9000)
_, _, err := svc.Apply(context.TODO(), orgID, 0, pkg)
require.NoError(t, err)
assert.Equal(t, numExpected, fakeLabelSVC.CreateLabelMappingCalls.Count())
})
})
t.Run("deletes new label mappings on error", func(t *testing.T) {
t.Helper()
testfileRunner(t, filename, func(t *testing.T, pkg *Pkg) {
t.Helper()
for _, l := range pkg.mLabels {
for resource, vals := range l.mappings {
// create extra label mappings, enough for delete to ahve head room
l.mappings[resource] = append(l.mappings[resource], vals...)
l.mappings[resource] = append(l.mappings[resource], vals...)
l.mappings[resource] = append(l.mappings[resource], vals...)
}
}
fakeLabelSVC := mock.NewLabelService()
fakeLabelSVC.CreateLabelFn = func(_ context.Context, l *influxdb.Label) error {
l.ID = influxdb.ID(fakeLabelSVC.CreateLabelCalls.Count() + 1)
return nil
}
fakeLabelSVC.DeleteLabelMappingFn = func(_ context.Context, m *influxdb.LabelMapping) error {
return nil
}
fakeLabelSVC.CreateLabelMappingFn = func(_ context.Context, mapping *influxdb.LabelMapping) error {
if mapping.ResourceID == 0 {
return errors.New("did not get a resource ID")
}
if mapping.ResourceType == "" {
return errors.New("did not get a resource type")
}
if fakeLabelSVC.CreateLabelMappingCalls.Count() > numExpected {
return errors.New("hit last label")
}
return nil
}
svc := newTestService(append(settersFn(),
WithLabelSVC(fakeLabelSVC),
WithLogger(zaptest.NewLogger(t)),
)...)
orgID := influxdb.ID(9000)
_, _, err := svc.Apply(context.TODO(), orgID, 0, pkg)
require.Error(t, err)
assert.GreaterOrEqual(t, fakeLabelSVC.DeleteLabelMappingCalls.Count(), numExpected)
})
})
}
t.Run("maps buckets with labels", func(t *testing.T) {
bktOpt := func() []ServiceSetterFn {
fakeBktSVC := mock.NewBucketService()
@ -1114,35 +1020,24 @@ func TestService(t *testing.T) {
})
t.Run("maps notification rules with labels", func(t *testing.T) {
testLabelMappingFn(
t,
"testdata/notification_rule.yml",
1,
func() []ServiceSetterFn {
fakeEndpointSVC := mock.NewNotificationEndpointService()
fakeEndpointSVC.FindNotificationEndpointsF = func(ctx context.Context, f influxdb.NotificationEndpointFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) {
id := influxdb.ID(9)
return []influxdb.NotificationEndpoint{
&endpoint.HTTP{
Base: endpoint.Base{
ID: &id,
Name: "endpoint_0",
},
AuthMethod: "none",
},
}, 1, nil
}
fakeRuleStore := mock.NewNotificationRuleStore()
fakeRuleStore.CreateNotificationRuleF = func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID influxdb.ID) error {
nr.SetID(influxdb.ID(fakeRuleStore.CreateNotificationRuleCalls.Count() + 1))
return nil
}
return []ServiceSetterFn{
WithNotificationEndpointSVC(fakeEndpointSVC),
WithNotificationRuleSVC(fakeRuleStore),
}
},
)
opts := func() []ServiceSetterFn {
fakeRuleStore := mock.NewNotificationRuleStore()
fakeRuleStore.CreateNotificationRuleF = func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID influxdb.ID) error {
nr.SetID(influxdb.ID(fakeRuleStore.CreateNotificationRuleCalls.Count() + 1))
return nil
}
return []ServiceSetterFn{
WithNotificationRuleSVC(fakeRuleStore),
}
}
t.Run("applies successfully", func(t *testing.T) {
testLabelMappingV2ApplyFn(t, "testdata/notification_rule.yml", 2, opts)
})
t.Run("deletes new label mappings on error", func(t *testing.T) {
testLabelMappingV2RollbackFn(t, "testdata/notification_rule.yml", 1, opts)
})
})
t.Run("maps tasks with labels", func(t *testing.T) {
@ -1289,16 +1184,9 @@ func TestService(t *testing.T) {
t.Run("successfuly creates", func(t *testing.T) {
testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, pkg *Pkg) {
fakeEndpointSVC := mock.NewNotificationEndpointService()
fakeEndpointSVC.FindNotificationEndpointsF = func(ctx context.Context, f influxdb.NotificationEndpointFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) {
id := influxdb.ID(9)
return []influxdb.NotificationEndpoint{
&endpoint.HTTP{
Base: endpoint.Base{
ID: &id,
Name: "endpoint_0",
},
},
}, 1, nil
fakeEndpointSVC.CreateNotificationEndpointF = func(ctx context.Context, nr influxdb.NotificationEndpoint, userID influxdb.ID) error {
nr.SetID(influxdb.ID(fakeEndpointSVC.CreateNotificationEndpointCalls.Count() + 1))
return nil
}
fakeRuleStore := mock.NewNotificationRuleStore()
fakeRuleStore.CreateNotificationRuleF = func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID influxdb.ID) error {
@ -1319,33 +1207,17 @@ func TestService(t *testing.T) {
require.Len(t, sum.NotificationRules, 1)
assert.Equal(t, "rule_0", sum.NotificationRules[0].Name)
assert.Equal(t, "desc_0", sum.NotificationRules[0].Description)
assert.Equal(t, SafeID(9), sum.NotificationRules[0].EndpointID)
assert.Equal(t, "endpoint_0", sum.NotificationRules[0].EndpointName)
assert.Equal(t, "http", sum.NotificationRules[0].EndpointType)
assert.Equal(t, SafeID(1), sum.NotificationRules[0].EndpointID)
assert.Equal(t, "endpoint_0", sum.NotificationRules[0].EndpointPkgName)
assert.Equal(t, "slack", sum.NotificationRules[0].EndpointType)
})
})
t.Run("rolls back all created notification rules on an error", func(t *testing.T) {
testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, pkg *Pkg) {
fakeEndpointSVC := mock.NewNotificationEndpointService()
fakeEndpointSVC.FindNotificationEndpointsF = func(ctx context.Context, f influxdb.NotificationEndpointFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) {
id := influxdb.ID(9)
return []influxdb.NotificationEndpoint{
&endpoint.HTTP{
Base: endpoint.Base{
ID: &id,
Name: "endpoint_0",
},
AuthMethod: "none",
},
}, 1, nil
}
fakeRuleStore := mock.NewNotificationRuleStore()
fakeRuleStore.CreateNotificationRuleF = func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID influxdb.ID) error {
if fakeRuleStore.CreateNotificationRuleCalls.Count() == 1 {
return errors.New("limit hit")
}
nr.SetID(1)
nr.SetID(influxdb.ID(fakeRuleStore.CreateNotificationRuleCalls.Count() + 1))
return nil
}
fakeRuleStore.DeleteNotificationRuleF = func(ctx context.Context, id influxdb.ID) error {
@ -1354,11 +1226,17 @@ func TestService(t *testing.T) {
}
return nil
}
pkg.mNotificationRules["rule_UUID_copy"] = pkg.mNotificationRules["rule_UUID"]
fakeLabelSVC := mock.NewLabelService()
fakeLabelSVC.CreateLabelFn = func(ctx context.Context, l *influxdb.Label) error {
l.ID = influxdb.ID(fakeLabelSVC.CreateLabelCalls.Count() + 1)
return nil
}
fakeLabelSVC.CreateLabelMappingFn = func(ctx context.Context, m *influxdb.LabelMapping) error {
return errors.New("start the rollack")
}
svc := newTestService(
WithNotificationEndpointSVC(fakeEndpointSVC),
WithLabelSVC(fakeLabelSVC),
WithNotificationRuleSVC(fakeRuleStore),
)
@ -2527,8 +2405,8 @@ func TestService(t *testing.T) {
actualRule := sum.NotificationRules[0]
assert.Zero(t, actualRule.ID)
assert.Zero(t, actualRule.EndpointID)
assert.Zero(t, actualRule.EndpointType)
assert.NotEmpty(t, actualRule.EndpointName)
assert.NotEmpty(t, actualRule.EndpointType)
assert.NotEmpty(t, actualRule.EndpointPkgName)
baseEqual := func(t *testing.T, base rule.Base) {
t.Helper()
@ -2625,7 +2503,7 @@ func TestService(t *testing.T) {
sum := newPkg.Summary()
require.Len(t, sum.NotificationRules, len(resourcesToClone))
expectedSameEndpointName := sum.NotificationRules[0].EndpointName
expectedSameEndpointName := sum.NotificationRules[0].EndpointPkgName
assert.NotZero(t, expectedSameEndpointName)
assert.NotEqual(t, "endpoint_0", expectedSameEndpointName)
@ -2633,7 +2511,7 @@ func TestService(t *testing.T) {
actual := sum.NotificationRules[i]
assert.Equal(t, "old_name", actual.Name)
assert.Equal(t, "desc", actual.Description)
assert.Equal(t, expectedSameEndpointName, actual.EndpointName)
assert.Equal(t, expectedSameEndpointName, actual.EndpointPkgName)
}
require.Len(t, sum.NotificationEndpoints, 1)
@ -3233,7 +3111,7 @@ func TestService(t *testing.T) {
rules := summary.NotificationRules
require.Len(t, rules, 1)
assert.Equal(t, expectedRule.Name, rules[0].Name)
assert.NotEmpty(t, rules[0].EndpointName)
assert.NotEmpty(t, rules[0].EndpointPkgName)
require.Len(t, summary.Tasks, 1)
task1 := summary.Tasks[0]

View File

@ -6,6 +6,13 @@
"name": "label_1"
}
},
{
"apiVersion": "influxdata.com/v2alpha1",
"kind": "Label",
"metadata": {
"name": "label_2"
}
},
{
"apiVersion": "influxdata.com/v2alpha1",
"kind": "NotificationRule",
@ -46,9 +53,24 @@
{
"kind": "Label",
"name": "label_1"
},
{
"kind": "Label",
"name": "label_2"
}
]
}
},
{
"apiVersion": "influxdata.com/v2alpha1",
"kind": "NotificationEndpointSlack",
"metadata": {
"name": "endpoint_0"
},
"spec": {
"url": "https://hooks.slack.com/services/bip/piddy/boppidy"
}
}
]

View File

@ -5,6 +5,11 @@ metadata:
name: label_1
---
apiVersion: influxdata.com/v2alpha1
kind: Label
metadata:
name: label_2
---
apiVersion: influxdata.com/v2alpha1
kind: NotificationRule
metadata:
name: rule_UUID
@ -31,3 +36,12 @@ spec:
associations:
- kind: Label
name: label_1
- kind: Label
name: label_2
---
apiVersion: influxdata.com/v2alpha1
kind: NotificationEndpointSlack
metadata:
name: endpoint_0
spec:
url: https://hooks.slack.com/services/bip/piddy/boppidy

View File

@ -45,7 +45,7 @@ func (e *Engine) MeasurementTagValues(ctx context.Context, orgID, bucketID influ
// MeasurementTagKeys returns an iterator which enumerates the tag keys for the given
// bucket and measurement, filtered using the optional the predicate and limited to the
//// time range [start, end].
// time range [start, end].
//
// MeasurementTagKeys will always return a StringIterator if there is no error.
//
@ -63,7 +63,7 @@ func (e *Engine) MeasurementTagKeys(ctx context.Context, orgID, bucketID influxd
// MeasurementFields returns an iterator which enumerates the field schema for the given
// bucket and measurement, filtered using the optional the predicate and limited to the
//// time range [start, end].
// time range [start, end].
//
// MeasurementFields will always return a MeasurementFieldsIterator if there is no error.
//

View File

@ -10,6 +10,7 @@ import (
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
@ -21,7 +22,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Node_Type int32
@ -163,7 +164,7 @@ func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Node.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@ -189,34 +190,34 @@ type isNode_Value interface {
}
type Node_StringValue struct {
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"`
}
type Node_BooleanValue struct {
BooleanValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
BooleanValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"`
}
type Node_IntegerValue struct {
IntegerValue int64 `protobuf:"varint,5,opt,name=int_value,json=intValue,proto3,oneof"`
IntegerValue int64 `protobuf:"varint,5,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"`
}
type Node_UnsignedValue struct {
UnsignedValue uint64 `protobuf:"varint,6,opt,name=uint_value,json=uintValue,proto3,oneof"`
UnsignedValue uint64 `protobuf:"varint,6,opt,name=uint_value,json=uintValue,proto3,oneof" json:"uint_value,omitempty"`
}
type Node_FloatValue struct {
FloatValue float64 `protobuf:"fixed64,7,opt,name=float_value,json=floatValue,proto3,oneof"`
FloatValue float64 `protobuf:"fixed64,7,opt,name=float_value,json=floatValue,proto3,oneof" json:"float_value,omitempty"`
}
type Node_RegexValue struct {
RegexValue string `protobuf:"bytes,8,opt,name=regex_value,json=regexValue,proto3,oneof"`
RegexValue string `protobuf:"bytes,8,opt,name=regex_value,json=regexValue,proto3,oneof" json:"regex_value,omitempty"`
}
type Node_TagRefValue struct {
TagRefValue string `protobuf:"bytes,9,opt,name=tag_ref_value,json=tagRefValue,proto3,oneof"`
TagRefValue string `protobuf:"bytes,9,opt,name=tag_ref_value,json=tagRefValue,proto3,oneof" json:"tag_ref_value,omitempty"`
}
type Node_FieldRefValue struct {
FieldRefValue string `protobuf:"bytes,10,opt,name=field_ref_value,json=fieldRefValue,proto3,oneof"`
FieldRefValue string `protobuf:"bytes,10,opt,name=field_ref_value,json=fieldRefValue,proto3,oneof" json:"field_ref_value,omitempty"`
}
type Node_Logical_ struct {
Logical Node_Logical `protobuf:"varint,11,opt,name=logical,proto3,enum=influxdata.platform.storage.Node_Logical,oneof"`
Logical Node_Logical `protobuf:"varint,11,opt,name=logical,proto3,enum=influxdata.platform.storage.Node_Logical,oneof" json:"logical,omitempty"`
}
type Node_Comparison_ struct {
Comparison Node_Comparison `protobuf:"varint,12,opt,name=comparison,proto3,enum=influxdata.platform.storage.Node_Comparison,oneof"`
Comparison Node_Comparison `protobuf:"varint,12,opt,name=comparison,proto3,enum=influxdata.platform.storage.Node_Comparison,oneof" json:"comparison,omitempty"`
}
func (*Node_StringValue) isNode_Value() {}
@ -321,9 +322,9 @@ func (m *Node) GetComparison() Node_Comparison {
return ComparisonEqual
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Node) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Node_OneofMarshaler, _Node_OneofUnmarshaler, _Node_OneofSizer, []interface{}{
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Node) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Node_StringValue)(nil),
(*Node_BooleanValue)(nil),
(*Node_IntegerValue)(nil),
@ -337,174 +338,6 @@ func (*Node) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, f
}
}
func _Node_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Node)
// value
switch x := m.Value.(type) {
case *Node_StringValue:
_ = b.EncodeVarint(3<<3 | proto.WireBytes)
_ = b.EncodeStringBytes(x.StringValue)
case *Node_BooleanValue:
t := uint64(0)
if x.BooleanValue {
t = 1
}
_ = b.EncodeVarint(4<<3 | proto.WireVarint)
_ = b.EncodeVarint(t)
case *Node_IntegerValue:
_ = b.EncodeVarint(5<<3 | proto.WireVarint)
_ = b.EncodeVarint(uint64(x.IntegerValue))
case *Node_UnsignedValue:
_ = b.EncodeVarint(6<<3 | proto.WireVarint)
_ = b.EncodeVarint(uint64(x.UnsignedValue))
case *Node_FloatValue:
_ = b.EncodeVarint(7<<3 | proto.WireFixed64)
_ = b.EncodeFixed64(math.Float64bits(x.FloatValue))
case *Node_RegexValue:
_ = b.EncodeVarint(8<<3 | proto.WireBytes)
_ = b.EncodeStringBytes(x.RegexValue)
case *Node_TagRefValue:
_ = b.EncodeVarint(9<<3 | proto.WireBytes)
_ = b.EncodeStringBytes(x.TagRefValue)
case *Node_FieldRefValue:
_ = b.EncodeVarint(10<<3 | proto.WireBytes)
_ = b.EncodeStringBytes(x.FieldRefValue)
case *Node_Logical_:
_ = b.EncodeVarint(11<<3 | proto.WireVarint)
_ = b.EncodeVarint(uint64(x.Logical))
case *Node_Comparison_:
_ = b.EncodeVarint(12<<3 | proto.WireVarint)
_ = b.EncodeVarint(uint64(x.Comparison))
case nil:
default:
return fmt.Errorf("Node.Value has unexpected type %T", x)
}
return nil
}
func _Node_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Node)
switch tag {
case 3: // value.string_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &Node_StringValue{x}
return true, err
case 4: // value.bool_value
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &Node_BooleanValue{x != 0}
return true, err
case 5: // value.int_value
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &Node_IntegerValue{int64(x)}
return true, err
case 6: // value.uint_value
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &Node_UnsignedValue{x}
return true, err
case 7: // value.float_value
if wire != proto.WireFixed64 {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeFixed64()
m.Value = &Node_FloatValue{math.Float64frombits(x)}
return true, err
case 8: // value.regex_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &Node_RegexValue{x}
return true, err
case 9: // value.tag_ref_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &Node_TagRefValue{x}
return true, err
case 10: // value.field_ref_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &Node_FieldRefValue{x}
return true, err
case 11: // value.logical
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &Node_Logical_{Node_Logical(x)}
return true, err
case 12: // value.comparison
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &Node_Comparison_{Node_Comparison(x)}
return true, err
default:
return false, nil
}
}
func _Node_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Node)
// value
switch x := m.Value.(type) {
case *Node_StringValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(len(x.StringValue)))
n += len(x.StringValue)
case *Node_BooleanValue:
n += 1 // tag and wire
n += 1
case *Node_IntegerValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(x.IntegerValue))
case *Node_UnsignedValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(x.UnsignedValue))
case *Node_FloatValue:
n += 1 // tag and wire
n += 8
case *Node_RegexValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(len(x.RegexValue)))
n += len(x.RegexValue)
case *Node_TagRefValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(len(x.TagRefValue)))
n += len(x.TagRefValue)
case *Node_FieldRefValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(len(x.FieldRefValue)))
n += len(x.FieldRefValue)
case *Node_Logical_:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(x.Logical))
case *Node_Comparison_:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(x.Comparison))
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Predicate struct {
Root *Node `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"`
}
@ -523,7 +356,7 @@ func (m *Predicate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Predicate.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
@ -621,7 +454,7 @@ var fileDescriptor_87cba9804b436f42 = []byte{
func (m *Node) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@ -629,121 +462,184 @@ func (m *Node) Marshal() (dAtA []byte, err error) {
}
func (m *Node) MarshalTo(dAtA []byte) (int, error) {
var i int
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.NodeType != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintPredicate(dAtA, i, uint64(m.NodeType))
}
if len(m.Children) > 0 {
for _, msg := range m.Children {
dAtA[i] = 0x12
i++
i = encodeVarintPredicate(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
if m.Value != nil {
{
size := m.Value.Size()
i -= size
if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i += n
}
}
if m.Value != nil {
nn1, err := m.Value.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
if len(m.Children) > 0 {
for iNdEx := len(m.Children) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Children[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPredicate(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
i += nn1
}
return i, nil
if m.NodeType != 0 {
i = encodeVarintPredicate(dAtA, i, uint64(m.NodeType))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Node_StringValue) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x1a
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= len(m.StringValue)
copy(dAtA[i:], m.StringValue)
i = encodeVarintPredicate(dAtA, i, uint64(len(m.StringValue)))
i += copy(dAtA[i:], m.StringValue)
return i, nil
i--
dAtA[i] = 0x1a
return len(dAtA) - i, nil
}
func (m *Node_BooleanValue) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x20
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_BooleanValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i--
if m.BooleanValue {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
return i, nil
i--
dAtA[i] = 0x20
return len(dAtA) - i, nil
}
func (m *Node_IntegerValue) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x28
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_IntegerValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i = encodeVarintPredicate(dAtA, i, uint64(m.IntegerValue))
return i, nil
i--
dAtA[i] = 0x28
return len(dAtA) - i, nil
}
func (m *Node_UnsignedValue) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x30
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_UnsignedValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i = encodeVarintPredicate(dAtA, i, uint64(m.UnsignedValue))
return i, nil
i--
dAtA[i] = 0x30
return len(dAtA) - i, nil
}
func (m *Node_FloatValue) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x39
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_FloatValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.FloatValue))))
i += 8
return i, nil
i--
dAtA[i] = 0x39
return len(dAtA) - i, nil
}
func (m *Node_RegexValue) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x42
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_RegexValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= len(m.RegexValue)
copy(dAtA[i:], m.RegexValue)
i = encodeVarintPredicate(dAtA, i, uint64(len(m.RegexValue)))
i += copy(dAtA[i:], m.RegexValue)
return i, nil
i--
dAtA[i] = 0x42
return len(dAtA) - i, nil
}
func (m *Node_TagRefValue) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x4a
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_TagRefValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= len(m.TagRefValue)
copy(dAtA[i:], m.TagRefValue)
i = encodeVarintPredicate(dAtA, i, uint64(len(m.TagRefValue)))
i += copy(dAtA[i:], m.TagRefValue)
return i, nil
i--
dAtA[i] = 0x4a
return len(dAtA) - i, nil
}
func (m *Node_FieldRefValue) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x52
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_FieldRefValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= len(m.FieldRefValue)
copy(dAtA[i:], m.FieldRefValue)
i = encodeVarintPredicate(dAtA, i, uint64(len(m.FieldRefValue)))
i += copy(dAtA[i:], m.FieldRefValue)
return i, nil
i--
dAtA[i] = 0x52
return len(dAtA) - i, nil
}
func (m *Node_Logical_) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x58
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_Logical_) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i = encodeVarintPredicate(dAtA, i, uint64(m.Logical))
return i, nil
i--
dAtA[i] = 0x58
return len(dAtA) - i, nil
}
func (m *Node_Comparison_) MarshalTo(dAtA []byte) (int, error) {
i := 0
dAtA[i] = 0x60
i++
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node_Comparison_) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i = encodeVarintPredicate(dAtA, i, uint64(m.Comparison))
return i, nil
i--
dAtA[i] = 0x60
return len(dAtA) - i, nil
}
func (m *Predicate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@ -751,31 +647,40 @@ func (m *Predicate) Marshal() (dAtA []byte, err error) {
}
func (m *Predicate) MarshalTo(dAtA []byte) (int, error) {
var i int
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Predicate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Root != nil {
dAtA[i] = 0xa
i++
i = encodeVarintPredicate(dAtA, i, uint64(m.Root.Size()))
n2, err := m.Root.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
{
size, err := m.Root.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintPredicate(dAtA, i, uint64(size))
}
i += n2
i--
dAtA[i] = 0xa
}
return i, nil
return len(dAtA) - i, nil
}
func encodeVarintPredicate(dAtA []byte, offset int, v uint64) int {
offset -= sovPredicate(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
return base
}
func (m *Node) Size() (n int) {
if m == nil {
@ -906,14 +811,7 @@ func (m *Predicate) Size() (n int) {
}
func sovPredicate(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
return (math_bits.Len64(x|1) + 6) / 7
}
func sozPredicate(x uint64) (n int) {
return sovPredicate(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@ -1356,6 +1254,7 @@ func (m *Predicate) Unmarshal(dAtA []byte) error {
func skipPredicate(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@ -1387,10 +1286,8 @@ func skipPredicate(dAtA []byte) (n int, err error) {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@ -1411,55 +1308,30 @@ func skipPredicate(dAtA []byte) (n int, err error) {
return 0, ErrInvalidLengthPredicate
}
iNdEx += length
if iNdEx < 0 {
return 0, ErrInvalidLengthPredicate
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPredicate
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipPredicate(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
if iNdEx < 0 {
return 0, ErrInvalidLengthPredicate
}
}
return iNdEx, nil
depth++
case 4:
return iNdEx, nil
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupPredicate
}
depth--
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthPredicate
}
if depth == 0 {
return iNdEx, nil
}
}
panic("unreachable")
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthPredicate = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowPredicate = fmt.Errorf("proto: integer overflow")
ErrInvalidLengthPredicate = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowPredicate = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupPredicate = fmt.Errorf("proto: unexpected end of group")
)

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,6 @@ package influxdata.platform.storage;
option go_package = "datatypes";
import "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/any.proto";
import "predicate.proto";
@ -175,7 +174,61 @@ message TagValuesRequest {
string tag_key = 4;
}
// Response message for Storage.TagKeys and Storage.TagValues.
// Response message for Storage.TagKeys, Storage.TagValues Storage.MeasurementNames,
// Storage.MeasurementTagKeys and Storage.MeasurementTagValues.
message StringValuesResponse {
repeated bytes values = 1;
}
// MeasurementNamesRequest is the request message for Storage.MeasurementNames.
message MeasurementNamesRequest {
google.protobuf.Any source = 1;
TimestampRange range = 2 [(gogoproto.nullable) = false];
}
// MeasurementTagKeysRequest is the request message for Storage.MeasurementTagKeys.
message MeasurementTagKeysRequest {
google.protobuf.Any source = 1;
string measurement = 2;
TimestampRange range = 3 [(gogoproto.nullable) = false];
Predicate predicate = 4;
}
// MeasurementTagValuesRequest is the request message for Storage.MeasurementTagValues.
message MeasurementTagValuesRequest {
google.protobuf.Any source = 1;
string measurement = 2;
string tag_key = 3;
TimestampRange range = 4 [(gogoproto.nullable) = false];
Predicate predicate = 5;
}
// MeasurementFieldsRequest is the request message for Storage.MeasurementFields.
message MeasurementFieldsRequest {
google.protobuf.Any source = 1;
string measurement = 2;
TimestampRange range = 3 [(gogoproto.nullable) = false];
Predicate predicate = 4;
}
// MeasurementFieldsResponse is the response message for Storage.MeasurementFields.
message MeasurementFieldsResponse {
enum FieldType {
option (gogoproto.goproto_enum_prefix) = false;
FLOAT = 0 [(gogoproto.enumvalue_customname) = "FieldTypeFloat"];
INTEGER = 1 [(gogoproto.enumvalue_customname) = "FieldTypeInteger"];
UNSIGNED = 2 [(gogoproto.enumvalue_customname) = "FieldTypeUnsigned"];
STRING = 3 [(gogoproto.enumvalue_customname) = "FieldTypeString"];
BOOLEAN = 4 [(gogoproto.enumvalue_customname) = "FieldTypeBoolean"];
UNDEFINED = 5 [(gogoproto.enumvalue_customname) = "FieldTypeUndefined"];
}
message MessageField {
string key = 1;
FieldType type = 2;
sfixed64 timestamp = 3;
}
repeated MessageField fields = 1 [(gogoproto.nullable) = false];
}

View File

@ -0,0 +1,28 @@
// Code generated by "stringer -type FieldType"; DO NOT EDIT.
package cursors
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Float-0]
_ = x[Integer-1]
_ = x[Unsigned-2]
_ = x[String-3]
_ = x[Boolean-4]
_ = x[Undefined-5]
}
const _FieldType_name = "FloatIntegerUnsignedStringBooleanUndefined"
var _FieldType_index = [...]uint8{0, 5, 12, 20, 26, 33, 42}
func (i FieldType) String() string {
if i < 0 || i >= FieldType(len(_FieldType_index)-1) {
return "FieldType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _FieldType_name[_FieldType_index[i]:_FieldType_index[i+1]]
}

View File

@ -1,3 +1,4 @@
package cursors
//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@arrayvalues.gen.go.tmpldata arrayvalues.gen.go.tmpl
//go:generate stringer -type FieldType

View File

@ -1,5 +1,7 @@
package cursors
import "github.com/influxdata/influxql"
// FieldType represents the primitive field data types available in tsm.
type FieldType int
@ -7,14 +9,82 @@ const (
Float FieldType = iota // means the data type is a float
Integer // means the data type is an integer
Unsigned // means the data type is an unsigned integer
Boolean // means the data type is a boolean
String // means the data type is a string of text
Boolean // means the data type is a boolean
Undefined // means the data type in unknown or undefined
)
var (
fieldTypeToDataTypeMapping = [8]influxql.DataType{
Float: influxql.Float,
Integer: influxql.Integer,
Unsigned: influxql.Unsigned,
String: influxql.String,
Boolean: influxql.Boolean,
Undefined: influxql.Unknown,
6: influxql.Unknown,
7: influxql.Unknown,
}
)
// FieldTypeToDataType returns the equivalent influxql DataType for the field type ft.
// If ft is an invalid FieldType, the results are undefined.
func FieldTypeToDataType(ft FieldType) influxql.DataType {
return fieldTypeToDataTypeMapping[ft&7]
}
// IsLower returns true if the other FieldType has greater precedence than the
// current value. Undefined has the lowest precedence.
func (ft FieldType) IsLower(other FieldType) bool { return other < ft }
type MeasurementField struct {
Key string
Type FieldType
Key string // Key is the name of the field
Type FieldType // Type is field type
Timestamp int64 // Timestamp refers to the maximum timestamp observed for the given field
}
// MeasurementFieldSlice implements sort.Interface and sorts
// the slice from lowest to highest precedence. Use sort.Reverse
// to sort from highest to lowest.
type MeasurementFieldSlice []MeasurementField
func (m MeasurementFieldSlice) Len() int {
return len(m)
}
func (m MeasurementFieldSlice) Less(i, j int) bool {
ii, jj := &m[i], &m[j]
return ii.Key < jj.Key ||
(ii.Key == jj.Key &&
(ii.Timestamp < jj.Timestamp ||
(ii.Timestamp == jj.Timestamp && ii.Type.IsLower(jj.Type))))
}
func (m MeasurementFieldSlice) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
// UniqueByKey performs an in-place update of m, removing duplicate elements
// by Key, keeping the first occurrence of each. If the slice is not sorted,
// the behavior of UniqueByKey is undefined.
func (m *MeasurementFieldSlice) UniqueByKey() {
mm := *m
if len(mm) < 2 {
return
}
j := 0
for i := 1; i < len(mm); i++ {
if mm[j].Key != mm[i].Key {
j++
if j != i {
// optimization: skip copy if j == i
mm[j] = mm[i]
}
}
}
*m = mm[:j+1]
}
type MeasurementFields struct {

310
tsdb/cursors/schema_test.go Normal file
View File

@ -0,0 +1,310 @@
package cursors_test
import (
"math/rand"
"sort"
"testing"
"github.com/influxdata/influxdb/v2/pkg/testing/assert"
"github.com/influxdata/influxdb/v2/tsdb/cursors"
)
// Verifies FieldType precedence behavior is equivalent to influxql.DataType#LessThan
func TestFieldTypeDataTypePrecedenceEquivalence(t *testing.T) {
var fieldTypes = []cursors.FieldType{
cursors.Float,
cursors.Integer,
cursors.Unsigned,
cursors.Boolean,
cursors.String,
cursors.Undefined,
}
for _, fta := range fieldTypes {
for _, ftb := range fieldTypes {
if fta == ftb {
continue
}
got := fta.IsLower(ftb)
exp := cursors.FieldTypeToDataType(fta).LessThan(cursors.FieldTypeToDataType(ftb))
assert.Equal(t, got, exp, "failed %s.LessThan(%s)", fta.String(), ftb.String())
}
}
}
// Verifies sorting behavior of MeasurementFieldSlice
func TestMeasurementFieldSliceSort(t *testing.T) {
mfs := func(d ...cursors.MeasurementField) cursors.MeasurementFieldSlice {
return d
}
mf := func(key string, timestamp int64, ft cursors.FieldType) cursors.MeasurementField {
return cursors.MeasurementField{
Key: key,
Type: ft,
Timestamp: timestamp,
}
}
fltF := func(key string, ts int64) cursors.MeasurementField {
return mf(key, ts, cursors.Float)
}
intF := func(key string, ts int64) cursors.MeasurementField {
return mf(key, ts, cursors.Integer)
}
strF := func(key string, ts int64) cursors.MeasurementField {
return mf(key, ts, cursors.String)
}
blnF := func(key string, ts int64) cursors.MeasurementField {
return mf(key, ts, cursors.Boolean)
}
cases := []struct {
name string
in cursors.MeasurementFieldSlice
exp cursors.MeasurementFieldSlice
}{
{
name: "keys:diff types:same ts:same",
in: mfs(
fltF("bbb", 0),
fltF("aaa", 0),
fltF("ccc", 0),
),
exp: mfs(
fltF("aaa", 0),
fltF("bbb", 0),
fltF("ccc", 0),
),
},
{
name: "keys:same types:same ts:diff",
in: mfs(
fltF("aaa", 10),
fltF("ccc", 20),
fltF("aaa", 0),
fltF("ccc", 0),
),
exp: mfs(
fltF("aaa", 0),
fltF("aaa", 10),
fltF("ccc", 0),
fltF("ccc", 20),
),
},
{
name: "keys:same types:diff ts:same",
in: mfs(
strF("aaa", 0),
intF("aaa", 0),
fltF("aaa", 0),
blnF("aaa", 0),
),
exp: mfs(
blnF("aaa", 0),
strF("aaa", 0),
intF("aaa", 0),
fltF("aaa", 0),
),
},
{
name: "keys:same types:diff ts:diff",
in: mfs(
strF("aaa", 20),
intF("aaa", 10),
fltF("aaa", 0),
blnF("aaa", 30),
),
exp: mfs(
fltF("aaa", 0),
intF("aaa", 10),
strF("aaa", 20),
blnF("aaa", 30),
),
},
{
name: "keys:diff types:diff ts:diff",
in: mfs(
intF("ccc", 10),
blnF("fff", 30),
strF("aaa", 20),
fltF("ddd", 0),
),
exp: mfs(
strF("aaa", 20),
intF("ccc", 10),
fltF("ddd", 0),
blnF("fff", 30),
),
},
{
name: "keys:many types:many ts:same",
in: mfs(
intF("ccc", 10),
blnF("fff", 30),
strF("aaa", 20),
fltF("ddd", 0),
fltF("ccc", 10),
strF("fff", 30),
intF("aaa", 20),
blnF("ddd", 0),
),
exp: mfs(
strF("aaa", 20),
intF("aaa", 20),
intF("ccc", 10),
fltF("ccc", 10),
blnF("ddd", 0),
fltF("ddd", 0),
blnF("fff", 30),
strF("fff", 30),
),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got := tc.in
// randomize order using fixed seed to
// ensure tests are deterministic on a given platform
rand.Seed(100)
for i := 0; i < 5; i++ {
rand.Shuffle(len(got), func(i, j int) {
got[i], got[j] = got[j], got[i]
})
sort.Sort(got)
assert.Equal(t, got, tc.exp, "failed at index", i)
}
})
}
}
func TestMeasurementFieldSlice_UniqueByKey(t *testing.T) {
mfs := func(d ...cursors.MeasurementField) cursors.MeasurementFieldSlice {
return d
}
mf := func(key string, timestamp int64, ft cursors.FieldType) cursors.MeasurementField {
return cursors.MeasurementField{
Key: key,
Type: ft,
Timestamp: timestamp,
}
}
fltF := func(key string, ts int64) cursors.MeasurementField {
return mf(key, ts, cursors.Float)
}
t.Run("multiple start end", func(t *testing.T) {
got := mfs(
fltF("aaa", 0),
fltF("aaa", 10),
fltF("bbb", 10),
fltF("ccc", 10),
fltF("ccc", 20),
)
exp := mfs(
fltF("aaa", 0),
fltF("bbb", 10),
fltF("ccc", 10),
)
got.UniqueByKey()
assert.Equal(t, got, exp)
})
t.Run("multiple at end", func(t *testing.T) {
got := mfs(
fltF("aaa", 0),
fltF("bbb", 10),
fltF("ccc", 10),
fltF("ccc", 20),
fltF("ccc", 30),
)
exp := mfs(
fltF("aaa", 0),
fltF("bbb", 10),
fltF("ccc", 10),
)
got.UniqueByKey()
assert.Equal(t, got, exp)
})
t.Run("no duplicates many", func(t *testing.T) {
got := mfs(
fltF("aaa", 0),
fltF("bbb", 10),
fltF("ccc", 20),
)
exp := mfs(
fltF("aaa", 0),
fltF("bbb", 10),
fltF("ccc", 20),
)
got.UniqueByKey()
assert.Equal(t, got, exp)
})
t.Run("no duplicates two elements", func(t *testing.T) {
got := mfs(
fltF("aaa", 0),
fltF("bbb", 10),
)
exp := mfs(
fltF("aaa", 0),
fltF("bbb", 10),
)
got.UniqueByKey()
assert.Equal(t, got, exp)
})
t.Run("duplicates one key", func(t *testing.T) {
got := mfs(
fltF("aaa", 0),
fltF("aaa", 10),
fltF("aaa", 10),
fltF("aaa", 10),
fltF("aaa", 10),
fltF("aaa", 10),
)
exp := mfs(
fltF("aaa", 0),
)
got.UniqueByKey()
assert.Equal(t, got, exp)
})
t.Run("one element", func(t *testing.T) {
got := mfs(
fltF("aaa", 0),
)
exp := mfs(
fltF("aaa", 0),
)
got.UniqueByKey()
assert.Equal(t, got, exp)
})
t.Run("empty", func(t *testing.T) {
got := mfs()
exp := mfs()
got.UniqueByKey()
assert.Equal(t, got, exp)
})
}

75
tsdb/migrate/data_v1.go Normal file
View File

@ -0,0 +1,75 @@
package migrate
import (
"time"
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/v2/tsdb/migrate/internal"
)
//go:generate protoc --gogo_out=. internal/meta.proto
// Data represents the top level collection of all metadata.
type Data struct {
Term uint64 // associated raft term
Index uint64 // associated raft index
ClusterID uint64
Databases []DatabaseInfo
MaxShardGroupID uint64
MaxShardID uint64
}
// unmarshal deserializes from a protobuf representation.
func (data *Data) unmarshal(pb *internal.Data) {
data.Databases = make([]DatabaseInfo, len(pb.GetDatabases()))
for i, x := range pb.GetDatabases() {
data.Databases[i].unmarshal(x)
}
}
// UnmarshalBinary decodes the object from a binary format.
func (data *Data) UnmarshalBinary(buf []byte) error {
var pb internal.Data
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
data.unmarshal(&pb)
return nil
}
// DatabaseInfo represents information about a database in the system.
type DatabaseInfo struct {
Name string
DefaultRetentionPolicy string
RetentionPolicies []RetentionPolicyInfo
}
// unmarshal deserializes from a protobuf representation.
func (di *DatabaseInfo) unmarshal(pb *internal.DatabaseInfo) {
di.Name = pb.GetName()
di.DefaultRetentionPolicy = pb.GetDefaultRetentionPolicy()
if len(pb.GetRetentionPolicies()) > 0 {
di.RetentionPolicies = make([]RetentionPolicyInfo, len(pb.GetRetentionPolicies()))
for i, x := range pb.GetRetentionPolicies() {
di.RetentionPolicies[i].unmarshal(x)
}
}
}
// RetentionPolicyInfo represents metadata about a retention policy.
type RetentionPolicyInfo struct {
Name string
ReplicaN int
Duration time.Duration
ShardGroupDuration time.Duration
}
// unmarshal deserializes from a protobuf representation.
func (rpi *RetentionPolicyInfo) unmarshal(pb *internal.RetentionPolicyInfo) {
rpi.Name = pb.GetName()
rpi.ReplicaN = int(pb.GetReplicaN())
rpi.Duration = time.Duration(pb.GetDuration())
rpi.ShardGroupDuration = time.Duration(pb.GetShardGroupDuration())
}

File diff suppressed because it is too large Load Diff

View File

@ -45,6 +45,8 @@ const (
4 // Size in bytes of block
tsmKeyFieldSeparator1x = "#!~#" // tsm1 key field separator.
metaFile = "meta.db" // Default name of meta database
)
type Config struct {
@ -267,10 +269,24 @@ func (m *Migrator) createBucket(db, rp string) (influxdb.ID, error) {
return bucket.ID, nil
}
retName := ""
retDuration := time.Duration(0)
if rp != "" {
retentionPolicyInfo, err := m.getRetentionPolicy(db, rp)
if err != nil {
return 0, err
}
retName = retentionPolicyInfo.Name
retDuration = retentionPolicyInfo.Duration
}
if !m.DryRun {
bucket = &influxdb.Bucket{
Name: name,
OrgID: m.DestOrg,
OrgID: m.DestOrg,
Name: name,
RetentionPolicyName: retName,
RetentionPeriod: retDuration,
}
if err := m.metaSvc.CreateBucket(context.Background(), bucket); err != nil {
return 0, err
@ -283,6 +299,31 @@ func (m *Migrator) createBucket(db, rp string) (influxdb.ID, error) {
return bucket.ID, nil
}
// Load and extract retention policy from meta.db
func (m *Migrator) getRetentionPolicy(dbFilter, rpFilter string) (*RetentionPolicyInfo, error) {
file := filepath.Join(m.SourcePath, "meta/"+metaFile)
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
var cacheData = new(Data)
cacheData.UnmarshalBinary(data)
for _, database := range cacheData.Databases {
if database.Name == dbFilter {
for _, retPolicy := range database.RetentionPolicies {
if retPolicy.Name == rpFilter {
return &retPolicy, nil
}
}
}
}
return nil, errors.New("unable to find retention policy")
}
// Process1xShard migrates the TSM data in a single 1.x shard to the 2.x data directory.
//
// First, the shard is checked to determine it's fully compacted. Hot shards are

View File

@ -301,7 +301,7 @@ func (e *Engine) fieldsPredicate(ctx context.Context, orgID influxdb.ID, bucketI
vals := make([]cursors.MeasurementField, 0, len(tsmValues))
for key, val := range tsmValues {
vals = append(vals, cursors.MeasurementField{Key: key, Type: val.typ})
vals = append(vals, cursors.MeasurementField{Key: key, Type: val.typ, Timestamp: val.max})
}
return cursors.NewMeasurementFieldsSliceIteratorWithStats([]cursors.MeasurementFields{{Fields: vals}}, stats), nil
@ -403,7 +403,7 @@ func (e *Engine) fieldsNoPredicate(ctx context.Context, orgID influxdb.ID, bucke
vals := make([]cursors.MeasurementField, 0, len(tsmValues))
for key, val := range tsmValues {
vals = append(vals, cursors.MeasurementField{Key: key, Type: val.typ})
vals = append(vals, cursors.MeasurementField{Key: key, Type: val.typ, Timestamp: val.max})
}
return cursors.NewMeasurementFieldsSliceIteratorWithStats([]cursors.MeasurementFields{{Fields: vals}}, stats), nil

View File

@ -877,7 +877,7 @@ m10,foo=v barS="60" 501
min: 0,
max: 300,
},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.String}, {Key: "f", Type: cursors.Float}},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.String, Timestamp: 209}, {Key: "f", Type: cursors.Float, Timestamp: 201}},
expStats: makeStats(12),
},
{
@ -888,7 +888,7 @@ m10,foo=v barS="60" 501
min: 0,
max: 199,
},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.Integer}, {Key: "f", Type: cursors.Float}},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.Integer, Timestamp: 109}, {Key: "f", Type: cursors.Float, Timestamp: 109}},
expStats: makeStats(12),
},
{
@ -899,7 +899,7 @@ m10,foo=v barS="60" 501
min: 0,
max: 1000,
},
exp: []cursors.MeasurementField{{Key: "b", Type: cursors.Boolean}},
exp: []cursors.MeasurementField{{Key: "b", Type: cursors.Boolean, Timestamp: 201}},
expStats: makeStats(1),
},
{
@ -910,7 +910,7 @@ m10,foo=v barS="60" 501
min: 0,
max: 199,
},
exp: []cursors.MeasurementField{{Key: "barF", Type: cursors.Float}},
exp: []cursors.MeasurementField{{Key: "barF", Type: cursors.Float, Timestamp: 101}},
expStats: makeStats(1),
},
{
@ -921,7 +921,7 @@ m10,foo=v barS="60" 501
min: 200,
max: 299,
},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.String}, {Key: "f", Type: cursors.Float}},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.String, Timestamp: 209}, {Key: "f", Type: cursors.Float, Timestamp: 201}},
expStats: makeStats(6),
},
{
@ -932,7 +932,7 @@ m10,foo=v barS="60" 501
min: 109,
max: 109,
},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.Integer}, {Key: "f", Type: cursors.Float}},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.Integer, Timestamp: 109}, {Key: "f", Type: cursors.Float, Timestamp: 109}},
expStats: makeStats(6),
},
{
@ -943,7 +943,7 @@ m10,foo=v barS="60" 501
min: 201,
max: 201,
},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.Integer}, {Key: "f", Type: cursors.Float}},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.Integer, Timestamp: 202}, {Key: "f", Type: cursors.Float, Timestamp: 201}},
expStats: makeStats(6),
},
{
@ -954,7 +954,7 @@ m10,foo=v barS="60" 501
min: 202,
max: 202,
},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.String}},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.String, Timestamp: 209}},
expStats: makeStats(6),
},
{
@ -990,7 +990,7 @@ m10,foo=v barS="60" 501
max: 300,
expr: `tag10 = 'v10'`,
},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.Integer}, {Key: "f", Type: cursors.Float}},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.Integer, Timestamp: 202}, {Key: "f", Type: cursors.Float, Timestamp: 201}},
expStats: makeStats(3),
},
{
@ -1002,7 +1002,7 @@ m10,foo=v barS="60" 501
max: 300,
expr: `tag10 = 'v11'`,
},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.String}},
exp: []cursors.MeasurementField{{Key: "i", Type: cursors.String, Timestamp: 209}},
expStats: makeStats(3),
},
@ -1017,7 +1017,7 @@ m10,foo=v barS="60" 501
min: 0,
max: 1000,
},
exp: []cursors.MeasurementField{{Key: "barF", Type: cursors.Float}, {Key: "barS", Type: cursors.String}},
exp: []cursors.MeasurementField{{Key: "barF", Type: cursors.Float, Timestamp: 101}, {Key: "barS", Type: cursors.String, Timestamp: 501}},
expStats: makeStats(1),
},

View File

@ -65,10 +65,7 @@ describe('Buckets', () => {
})
cy.get<Bucket>('@bucket').then(() => {
cy.getByTestID(`cf-resource-card--meta-item`).should(
'contain',
'7 days'
)
cy.getByTestID(`bucket-retention`).should('contain', '7 days')
})
})

View File

@ -120,6 +120,8 @@ describe('Checks', () => {
cy.getByTestID('popover--dialog')
.should('exist')
.contains('Last Run Status:')
// Need to trigger mouseout else the popover obscures the other buttons
.trigger('mouseout')
// create a label
cy.getByTestID('check-card').within(() => {

View File

@ -301,13 +301,7 @@ describe('Dashboard', () => {
cy.createDashWithViewAndVar(orgID).then(() => {
cy.fixture('routes').then(({orgs}) => {
cy.visit(`${orgs}/${orgID}/dashboards`)
cy.getByTestID('dashboard-card--name').within(() => {
// Ideally we don't need to select the clickable element within the card name.
// The testID here should be on the clickable element
// Issue created in Clockface: https://github.com/influxdata/clockface/issues/478
cy.get('.cf-resource-name--text').click()
})
cy.getByTestID('dashboard-card--name').click()
cy.get('.cell--view').should('have.length', 1)
})
})

View File

@ -153,8 +153,7 @@ describe('labels', () => {
.contains(newLabelDescription)
.should('be.visible')
cy.getByTestID('label-card')
.children('div.cf-resource-card--contents')
.children('div.cf-resource-card--row')
.children('div.cf-flex-box')
.children('div.cf-label')
.invoke('attr', 'style')
.should('contain', hex2BgColor(newLabelColor))
@ -197,8 +196,7 @@ describe('labels', () => {
.should('be.visible')
cy.getByTestID('label-card')
.children('div.cf-resource-card--contents')
.children('div.cf-resource-card--row')
.children('div.cf-flex-box')
.children('div.cf-label')
.invoke('attr', 'style')
.should('contain', hex2BgColor(oldLabelColor))
@ -242,8 +240,7 @@ describe('labels', () => {
.contains(newLabelDescription)
.should('be.visible')
cy.getByTestID('label-card')
.children('div.cf-resource-card--contents')
.children('div.cf-resource-card--row')
.children('div.cf-flex-box')
.children('div.cf-label')
.invoke('attr', 'style')
.should('contain', hex2BgColor(newLabelColor))

View File

@ -129,9 +129,9 @@
"webpack-merge": "^4.2.1"
},
"dependencies": {
"@influxdata/clockface": "2.0.3",
"@influxdata/clockface": "2.1.0",
"@influxdata/flux": "^0.4.0",
"@influxdata/flux-lsp-browser": "^0.4.2",
"@influxdata/flux-lsp-browser": "^0.5.1",
"@influxdata/flux-parser": "^0.3.0",
"@influxdata/giraffe": "0.18.0",
"@influxdata/influx": "0.5.5",

View File

@ -2,7 +2,6 @@
import React, {ReactElement, PureComponent} from 'react'
import {withRouter, WithRouterProps} from 'react-router'
import {connect} from 'react-redux'
import auth0js from 'auth0-js'
import {client} from 'src/utils/api'
@ -20,9 +19,6 @@ import {CLOUD, CLOUD_SIGNIN_PATHNAME} from 'src/shared/constants'
// Types
import {RemoteDataState} from 'src/types'
// Utils
import {getAuth0Config} from 'src/authorizations/apis'
interface State {
loading: RemoteDataState
}
@ -85,45 +81,9 @@ export class Signin extends PureComponent<Props, State> {
clearInterval(this.intervalID)
const config = await getAuth0Config()
if (CLOUD && config.socialSignUpOn) {
// The redirectUri must be the same as the url for the origin of the request
// otherwise there's a mismatch and Auth0 cannot validate the response
const redirectUri = window.location.href
// The responseType is arbitrary as it needs to be a non-empty, non "code" value:
// https://auth0.github.io/auth0.js/web-auth_index.js.html#line564
const responseType = 'token'
const auth0 = new auth0js.WebAuth({
domain: config.domain,
clientID: config.clientID,
redirectUri,
responseType,
})
// This is the result of JS & Auth0 weirdness
return new Promise((resolve, reject) => {
// The TLDR is that checkSession is not awaiting the callback to complete
// So checkSession can return a successful response and continue with the operation
// without the callback being completely finished
return auth0.checkSession({}, (error, webResponse) => {
if (error) {
reject(error)
}
resolve(webResponse)
})
})
.then(() => {
window.location.pathname = CLOUD_SIGNIN_PATHNAME
})
.catch(() => {
this.props.router.replace('/login')
})
}
// TODO: add returnTo to CLOUD signin
if (CLOUD) {
// TODO: add returnTo to CLOUD signin
window.location.pathname = CLOUD_SIGNIN_PATHNAME
throw error
}

View File

@ -30,16 +30,14 @@ const MatchingRuleCard: FC<Props> = ({rule, endpoints}) => {
const endpoint = endpoints.find(e => e.id === rule.endpointID)
return (
<ResourceCard
key={`rule-id--${rule.id}`}
testID="rule-card"
name={<ResourceCard.Name name={rule.name} />}
description={<ResourceCard.Description description={rule.description} />}
metaData={[
<>{`Checks every: ${rule.every}`}</>,
<>{`Sends notifications to: ${endpoint.name}`}</>,
]}
/>
<ResourceCard key={`rule-id--${rule.id}`} testID="rule-card">
<ResourceCard.Name name={rule.name} />
<ResourceCard.Description description={rule.description} />
<ResourceCard.Meta>
<>{`Checks every: ${rule.every}`}</>
<>{`Sends notifications to: ${endpoint.name}`}</>
</ResourceCard.Meta>
</ResourceCard>
)
}

View File

@ -136,20 +136,30 @@ const BucketCard: FC<Props & WithRouterProps & DispatchProps> = ({
</FeatureFlag>
</FlexBox>
)
const retention = <>Retention: {bucket.readableRetention}</>
const cardMetaItems =
bucket.type === 'user'
? [retention]
: [
<span
className="system-bucket"
key={`system-bucket-indicator-${bucket.id}`}
>
System Bucket
</span>,
retention,
]
let cardMeta = (
<ResourceCard.Meta>
<span data-testid="bucket-retention">
Retention: {_.capitalize(bucket.readableRetention)}
</span>
</ResourceCard.Meta>
)
if (bucket.type !== 'user') {
cardMeta = (
<ResourceCard.Meta>
<span
className="system-bucket"
key={`system-bucket-indicator-${bucket.id}`}
>
System Bucket
</span>
<span data-testid="bucket-retention">
Retention: {_.capitalize(bucket.readableRetention)}
</span>
</ResourceCard.Meta>
)
}
return (
<ResourceCard
@ -159,15 +169,13 @@ const BucketCard: FC<Props & WithRouterProps & DispatchProps> = ({
<BucketContextMenu bucket={bucket} onDeleteBucket={onDeleteBucket} />
)
}
name={
<ResourceCard.Name
testID={`bucket--card--name ${bucket.name}`}
onClick={handleNameClick}
name={bucket.name}
/>
}
metaData={cardMetaItems}
>
<ResourceCard.Name
testID={`bucket--card--name ${bucket.name}`}
onClick={handleNameClick}
name={bucket.name}
/>
{cardMeta}
{bucket.type === 'user' && actionButtons}
</ResourceCard>
)

View File

@ -69,23 +69,21 @@ const DemoDataBucketCard: FC<Props & WithRouterProps & DispatchProps> = ({
</FlexBox>
</Context>
}
name={
<ResourceCard.Name
testID={`bucket--card--name ${bucket.name}`}
onClick={handleNameClick}
name={bucket.name}
/>
}
metaData={[
>
<ResourceCard.Name
testID={`bucket--card--name ${bucket.name}`}
onClick={handleNameClick}
name={bucket.name}
/>
<ResourceCard.Meta>
<span
className="system-bucket"
key={`system-bucket-indicator-${bucket.name}`}
>
Demo Data Bucket
</span>,
<>Retention: {bucket.readableRetention}</>,
]}
>
</span>
<>Retention: {bucket.readableRetention}</>
</ResourceCard.Meta>
<Label
id="1"
key="1"

View File

@ -4,7 +4,15 @@ import {connect} from 'react-redux'
import {withRouter, WithRouterProps} from 'react-router'
// Components
import {SlideToggle, ComponentSize, ResourceCard} from '@influxdata/clockface'
import {
SlideToggle,
ComponentSize,
ResourceCard,
FlexBox,
FlexDirection,
AlignItems,
JustifyContent,
} from '@influxdata/clockface'
import CheckCardContext from 'src/checks/components/CheckCardContext'
import InlineLabels from 'src/shared/components/inlineLabels/InlineLabels'
import LastRunTaskStatus from 'src/shared/components/lastRunTaskStatus/LastRunTaskStatus'
@ -116,7 +124,42 @@ const CheckCard: FC<Props> = ({
<ResourceCard
key={`check-id--${id}`}
testID="check-card"
name={
disabled={activeStatus === 'inactive'}
direction={FlexDirection.Row}
alignItems={AlignItems.Center}
margin={ComponentSize.Large}
contextMenu={
<CheckCardContext
onView={onView}
onDelete={onDelete}
onClone={onClone}
/>
}
>
<FlexBox
direction={FlexDirection.Column}
justifyContent={JustifyContent.Center}
margin={ComponentSize.Medium}
alignItems={AlignItems.FlexStart}
>
<SlideToggle
active={activeStatus === 'active'}
size={ComponentSize.ExtraSmall}
onChange={onToggle}
testID="check-card--slide-toggle"
style={{flexBasis: '16px'}}
/>
<LastRunTaskStatus
key={2}
lastRunError={check.lastRunError}
lastRunStatus={check.lastRunStatus}
/>
</FlexBox>
<FlexBox
direction={FlexDirection.Column}
margin={ComponentSize.Small}
alignItems={AlignItems.FlexStart}
>
<ResourceCard.EditableName
onUpdate={onUpdateName}
onClick={onCheckClick}
@ -126,47 +169,22 @@ const CheckCard: FC<Props> = ({
buttonTestID="check-card--name-button"
inputTestID="check-card--input"
/>
}
toggle={
<SlideToggle
active={activeStatus === 'active'}
size={ComponentSize.ExtraSmall}
onChange={onToggle}
testID="check-card--slide-toggle"
/>
}
description={
<ResourceCard.EditableDescription
onUpdate={onUpdateDescription}
description={description}
placeholder={`Describe ${name}`}
/>
}
labels={
<ResourceCard.Meta>
<>Last completed at {check.latestCompleted}</>
<>{relativeTimestampFormatter(check.updatedAt, 'Last updated ')}</>
</ResourceCard.Meta>
<InlineLabels
selectedLabelIDs={check.labels}
onAddLabel={handleAddCheckLabel}
onRemoveLabel={handleRemoveCheckLabel}
/>
}
disabled={activeStatus === 'inactive'}
contextMenu={
<CheckCardContext
onView={onView}
onDelete={onDelete}
onClone={onClone}
/>
}
metaData={[
<>Last completed at {check.latestCompleted}</>,
<>{relativeTimestampFormatter(check.updatedAt, 'Last updated ')}</>,
<LastRunTaskStatus
key={2}
lastRunError={check.lastRunError}
lastRunStatus={check.lastRunStatus}
/>,
]}
/>
</FlexBox>
</ResourceCard>
)
}

View File

@ -3,6 +3,8 @@
------------------------------------------------------------------------------
*/
$dashboard-grid-gap: $cf-marg-a;
.dashboards-index__page-contents {
.cf-dapper-scrollbars--content {
height: 100% !important;
@ -20,8 +22,8 @@
display: grid;
grid-template-columns: minmax(100%, 1fr);
grid-auto-rows: 152px;
grid-column-gap: $cf-marg-a;
grid-row-gap: $cf-marg-a;
grid-column-gap: $dashboard-grid-gap;
grid-row-gap: $dashboard-grid-gap;
height: 100%;
.cf-resource-card {
@ -43,26 +45,30 @@
}
.cf-resource-description {
height: 60px;
flex: 1 0 30px;
margin-top: 22px;
overflow: hidden;
}
}
$dashboard-card-half: calc(50% - #{$dashboard-grid-gap});
$dashboard-card-third: calc(33.3333% - #{$dashboard-grid-gap});
$dashboard-card-quarter: calc(25% - #{$dashboard-grid-gap});
@media screen and (min-width: $cf-grid--breakpoint-sm) {
.dashboards-card-grid {
grid-template-columns: minmax(50%, 1fr) minmax(50%, 1fr);
grid-template-columns: minmax($dashboard-card-half, 1fr) minmax($dashboard-card-half, 1fr);
}
}
@media screen and (min-width: $cf-grid--breakpoint-md) {
.dashboards-card-grid {
grid-template-columns: minmax(33.3333%, 1fr) minmax(33.3333%, 1fr) minmax(33.3333%, 1fr);
grid-template-columns: minmax($dashboard-card-third, 1fr) minmax($dashboard-card-third, 1fr) minmax($dashboard-card-third, 1fr);
}
}
@media screen and (min-width: $cf-grid--breakpoint-lg) {
.dashboards-card-grid {
grid-template-columns: minmax(25%, 1fr) minmax(25%, 1fr) minmax(25%, 1fr) minmax(25%, 1fr);
grid-template-columns: minmax($dashboard-card-quarter, 1fr) minmax($dashboard-card-quarter, 1fr) minmax($dashboard-card-quarter, 1fr) minmax($dashboard-card-quarter, 1fr);
}
}

View File

@ -62,37 +62,32 @@ class DashboardCard extends PureComponent<Props> {
<ResourceCard
key={`dashboard-id--${id}`}
testID="dashboard-card"
name={
<ResourceCard.EditableName
onUpdate={this.handleUpdateDashboard}
onClick={this.handleClickDashboard}
name={name}
noNameString={DEFAULT_DASHBOARD_NAME}
testID="dashboard-card--name"
buttonTestID="dashboard-card--name-button"
inputTestID="dashboard-card--input"
/>
}
description={
<ResourceCard.EditableDescription
onUpdate={this.handleUpdateDescription}
description={description}
placeholder={`Describe ${name}`}
/>
}
labels={
<InlineLabels
selectedLabelIDs={labels}
onFilterChange={onFilterChange}
onAddLabel={this.handleAddLabel}
onRemoveLabel={this.handleRemoveLabel}
/>
}
metaData={[
<>{relativeTimestampFormatter(updatedAt, 'Last modified ')}</>,
]}
contextMenu={this.contextMenu}
/>
>
<ResourceCard.EditableName
onUpdate={this.handleUpdateDashboard}
onClick={this.handleClickDashboard}
name={name}
noNameString={DEFAULT_DASHBOARD_NAME}
testID="dashboard-card--name"
buttonTestID="dashboard-card--name-button"
inputTestID="dashboard-card--input"
/>
<ResourceCard.EditableDescription
onUpdate={this.handleUpdateDescription}
description={description}
placeholder={`Describe ${name}`}
/>
<ResourceCard.Meta>
{relativeTimestampFormatter(updatedAt, 'Last modified ')}
</ResourceCard.Meta>
<InlineLabels
selectedLabelIDs={labels}
onFilterChange={onFilterChange}
onAddLabel={this.handleAddLabel}
onRemoveLabel={this.handleRemoveLabel}
/>
</ResourceCard>
)
}

View File

@ -26,6 +26,19 @@ export function registerCompletion(monaco: MonacoType, server: LSPServer) {
},
})
monaco.languages.registerDocumentFormattingEditProvider(FLUXLANGID, {
provideDocumentFormattingEdits: async (model, _context, _token) => {
try {
const uri = model.uri.toString()
const edits = await server.formatting(uri)
return p2m.asTextEdits(edits)
} catch (e) {
return []
}
},
})
monaco.languages.registerFoldingRangeProvider(FLUXLANGID, {
provideFoldingRanges: async (model, _context, _token) => {
try {
@ -116,6 +129,6 @@ export function registerCompletion(monaco: MonacoType, server: LSPServer) {
})
return p2m.asCompletionResult(items, defaultRange)
},
triggerCharacters: ['.', ':', '(', ','],
triggerCharacters: ['.', ':', '(', ',', '"'],
})
}

View File

@ -105,6 +105,12 @@ export const references = (
})
}
export const formatting = (id: number, uri: string) => {
return createRequest(id, 'textDocument/formatting', {
textDocument: {uri},
})
}
export const definition = (id: number, uri: string, position: Position) => {
return createRequest(id, 'textDocument/definition', {
textDocument: {uri},

View File

@ -14,11 +14,14 @@ import {
references,
definition,
symbols,
formatting,
} from 'src/external/monaco.flux.messages'
import {registerCompletion} from 'src/external/monaco.flux.lsp'
import {AppState, LocalStorage} from 'src/types'
import {getAllVariables, asAssignment} from 'src/variables/selectors'
import {buildVarsOption} from 'src/variables/utils/buildVarsOption'
import {runQuery} from 'src/shared/apis/query'
import {parseResponse as parse} from 'src/shared/parsing/flux/response'
import {store} from 'src/index'
@ -33,27 +36,52 @@ import {
WorkspaceEdit,
Location,
SymbolInformation,
TextEdit,
} from 'monaco-languageclient/lib/services'
import {Server} from '@influxdata/flux-lsp-browser'
type BucketCallback = () => Promise<string[]>
type MeasurementsCallback = (bucket: string) => Promise<string[]>
export interface WASMServer extends Server {
register_buckets_callback: (BucketCallback) => void
register_measurements_callback: (MeasurementsCallback) => void
}
import {format_from_js_file} from '@influxdata/flux'
// NOTE: parses table then select measurements from the _value column
const parseMeasurementsResponse = response => {
const data = parse(response.csv) || [{data: [{}]}]
return data.slice(1).map(r => r[3])
}
const queryMeasurements = async (orgID, bucket) => {
if (!this.orgID || this.orgID === '') {
throw new Error('no org is provided')
}
const query = `import "influxdata/influxdb/v1"
v1.measurements(bucket:"${bucket}")`
const raw = await runQuery(orgID, query).promise
if (raw.type !== 'SUCCESS') {
throw new Error('failed to get measurements')
}
}
export class LSPServer {
private server: WASMServer
private messageID: number = 0
private buckets: string[] = []
private orgID: string = ''
private documentVersions: {[key: string]: number} = {}
public store: Store<AppState & LocalStorage>
constructor(server: WASMServer, reduxStore = store) {
this.server = server
this.server.register_buckets_callback(this.getBuckets)
this.server.register_measurements_callback(this.getMeasurements)
this.store = reduxStore
}
@ -61,10 +89,23 @@ export class LSPServer {
return Promise.resolve(this.buckets)
}
getMeasurements = async (bucket: string) => {
try {
const response = await queryMeasurements(this.orgID, bucket)
return parseMeasurementsResponse(response)
} catch (e) {
return []
}
}
updateBuckets(buckets: string[]) {
this.buckets = buckets
}
setOrg(orgID: string) {
this.orgID = orgID
}
initialize() {
return this.send(initialize(this.currentMessageID))
}
@ -119,6 +160,16 @@ export class LSPServer {
return response.result
}
async formatting(uri): Promise<TextEdit[]> {
await this.sendPrelude(uri)
const response = (await this.send(
formatting(this.currentMessageID, uri)
)) as {result: TextEdit[]}
return response.result
}
async completionItems(
uri: string,
position: Position,
@ -131,10 +182,7 @@ export class LSPServer {
completion(
this.currentMessageID,
uri,
{
...position,
line: position.line,
},
{...position, line: position.line},
context
)
)) as {result?: {items?: []}}

View File

@ -1,16 +1,7 @@
.label-card {
.cf-resource-card--contents {
flex-direction: row;
align-items: center;
}
.cf-resource-card--row {
margin-right: $cf-marg-c;
margin-bottom: 0;
}
}
.label-card--description {
font-weight: $cf-font-weight--medium;
margin: 0;
margin-left: $cf-marg-c;
}
.label-card--description__untitled {

View File

@ -7,6 +7,8 @@ import {
ComponentSize,
Label as LabelComponent,
ResourceCard,
FlexDirection,
AlignItems,
} from '@influxdata/clockface'
// Types
@ -39,20 +41,19 @@ export default class LabelCard extends PureComponent<Props> {
return (
<ResourceCard
className="label-card"
testID="label-card"
contextMenu={<LabelContextMenu label={label} onDelete={onDelete} />}
name={
<LabelComponent
id={label.id}
name={label.name}
color={label.properties.color}
description={label.properties.description}
size={ComponentSize.Small}
onClick={this.handleClick}
/>
}
direction={FlexDirection.Row}
alignItems={AlignItems.Center}
>
<LabelComponent
id={label.id}
name={label.name}
color={label.properties.color}
description={label.properties.description}
size={ComponentSize.Small}
onClick={this.handleClick}
/>
<p
className={descriptionClassName}
data-testid="label-card--description"

View File

@ -16,16 +16,15 @@ export default class MemberCard extends PureComponent<Props> {
const {member, onDelete} = this.props
return (
<>
<ResourceCard
testID="task-card"
contextMenu={
<MemberContextMenu member={member} onDelete={onDelete} />
}
name={<ResourceCard.Name name={member.name} />}
metaData={[<>Role: {member.role}</>]}
/>
</>
<ResourceCard
testID="task-card"
contextMenu={<MemberContextMenu member={member} onDelete={onDelete} />}
>
<ResourceCard.Name name={member.name} />
<ResourceCard.Meta>
<>Role: {member.role}</>
</ResourceCard.Meta>
</ResourceCard>
)
}
}

View File

@ -13,7 +13,14 @@ import {
} from 'src/notifications/endpoints/actions/thunks'
// Components
import {SlideToggle, ComponentSize, ResourceCard} from '@influxdata/clockface'
import {
SlideToggle,
ComponentSize,
ResourceCard,
FlexDirection,
AlignItems,
FlexBox,
} from '@influxdata/clockface'
import EndpointCardMenu from 'src/notifications/endpoints/components/EndpointCardMenu'
import InlineLabels from 'src/shared/components/inlineLabels/InlineLabels'
@ -68,33 +75,11 @@ const EndpointCard: FC<Props> = ({
router.push(`orgs/${orgID}/alerting/endpoints/${id}/edit`)
}
const nameComponent = (
<ResourceCard.EditableName
key={id}
name={name}
onClick={handleClick}
onUpdate={handleUpdateName}
testID={`endpoint-card--name ${name}`}
inputTestID="endpoint-card--input"
buttonTestID="endpoint-card--name-button"
noNameString="Name this notification endpoint"
/>
)
const handleToggle = () => {
const toStatus = activeStatus === 'active' ? 'inactive' : 'active'
onUpdateEndpointProperties(id, {status: toStatus})
}
const toggle = (
<SlideToggle
active={activeStatus === 'active'}
size={ComponentSize.ExtraSmall}
onChange={handleToggle}
testID="endpoint-card--slide-toggle"
/>
)
const handleView = () => {
const historyType: AlertHistoryType = 'notifications'
@ -126,39 +111,56 @@ const EndpointCard: FC<Props> = ({
onRemoveEndpointLabel(id, label.id)
}
const labelsComponent = (
<InlineLabels
selectedLabelIDs={endpoint.labels}
onAddLabel={handleAddEndpointLabel}
onRemoveLabel={handleRemoveEndpointLabel}
/>
)
const handleUpdateDescription = (description: string) => {
onUpdateEndpointProperties(id, {description})
}
const descriptionComponent = (
<ResourceCard.EditableDescription
onUpdate={handleUpdateDescription}
description={description}
placeholder={`Describe ${name}`}
/>
)
return (
<ResourceCard
key={id}
toggle={toggle}
name={nameComponent}
contextMenu={contextMenu}
description={descriptionComponent}
labels={labelsComponent}
disabled={activeStatus === 'inactive'}
metaData={[
<>{relativeTimestampFormatter(endpoint.updatedAt, 'Last updated ')}</>,
]}
direction={FlexDirection.Row}
alignItems={AlignItems.Center}
margin={ComponentSize.Large}
testID={`endpoint-card ${name}`}
/>
>
<SlideToggle
active={activeStatus === 'active'}
size={ComponentSize.ExtraSmall}
onChange={handleToggle}
testID="endpoint-card--slide-toggle"
/>
<FlexBox
direction={FlexDirection.Column}
alignItems={AlignItems.FlexStart}
margin={ComponentSize.Small}
>
<ResourceCard.EditableName
key={id}
name={name}
onClick={handleClick}
onUpdate={handleUpdateName}
testID={`endpoint-card--name ${name}`}
inputTestID="endpoint-card--input"
buttonTestID="endpoint-card--name-button"
noNameString="Name this notification endpoint"
/>
<ResourceCard.EditableDescription
onUpdate={handleUpdateDescription}
description={description}
placeholder={`Describe ${name}`}
/>
<ResourceCard.Meta>
<>{relativeTimestampFormatter(endpoint.updatedAt, 'Last updated ')}</>
</ResourceCard.Meta>
<InlineLabels
selectedLabelIDs={endpoint.labels}
onAddLabel={handleAddEndpointLabel}
onRemoveLabel={handleRemoveEndpointLabel}
/>
</FlexBox>
</ResourceCard>
)
}

View File

@ -4,7 +4,15 @@ import {connect} from 'react-redux'
import {withRouter, WithRouterProps} from 'react-router'
// Components
import {SlideToggle, ComponentSize, ResourceCard} from '@influxdata/clockface'
import {
SlideToggle,
ComponentSize,
ResourceCard,
FlexBox,
FlexDirection,
AlignItems,
JustifyContent,
} from '@influxdata/clockface'
import NotificationRuleCardContext from 'src/notifications/rules/components/RuleCardContext'
import InlineLabels from 'src/shared/components/inlineLabels/InlineLabels'
import LastRunTaskStatus from 'src/shared/components/lastRunTaskStatus/LastRunTaskStatus'
@ -114,7 +122,42 @@ const RuleCard: FC<Props> = ({
<ResourceCard
key={`rule-id--${id}`}
testID={`rule-card ${name}`}
name={
disabled={activeStatus === 'inactive'}
direction={FlexDirection.Row}
alignItems={AlignItems.Center}
margin={ComponentSize.Large}
contextMenu={
<NotificationRuleCardContext
onView={onView}
onClone={onClone}
onDelete={onDelete}
/>
}
>
<FlexBox
direction={FlexDirection.Column}
justifyContent={JustifyContent.Center}
margin={ComponentSize.Medium}
alignItems={AlignItems.FlexStart}
>
<SlideToggle
active={activeStatus === 'active'}
size={ComponentSize.ExtraSmall}
onChange={onToggle}
testID="rule-card--slide-toggle"
style={{flexBasis: '16px'}}
/>
<LastRunTaskStatus
key={2}
lastRunError={lastRunError}
lastRunStatus={lastRunStatus}
/>
</FlexBox>
<FlexBox
direction={FlexDirection.Column}
margin={ComponentSize.Small}
alignItems={AlignItems.FlexStart}
>
<ResourceCard.EditableName
onUpdate={onUpdateName}
onClick={onRuleClick}
@ -124,47 +167,22 @@ const RuleCard: FC<Props> = ({
buttonTestID="rule-card--name-button"
inputTestID="rule-card--input"
/>
}
toggle={
<SlideToggle
active={activeStatus === 'active'}
size={ComponentSize.ExtraSmall}
onChange={onToggle}
testID="rule-card--slide-toggle"
/>
}
description={
<ResourceCard.EditableDescription
onUpdate={onUpdateDescription}
description={description}
placeholder={`Describe ${name}`}
/>
}
labels={
<ResourceCard.Meta>
<>Last completed at {latestCompleted}</>
<>{relativeTimestampFormatter(rule.updatedAt, 'Last updated ')}</>
</ResourceCard.Meta>
<InlineLabels
selectedLabelIDs={rule.labels}
onAddLabel={handleAddRuleLabel}
onRemoveLabel={handleRemoveRuleLabel}
/>
}
disabled={activeStatus === 'inactive'}
contextMenu={
<NotificationRuleCardContext
onView={onView}
onClone={onClone}
onDelete={onDelete}
/>
}
metaData={[
<>Last completed at {latestCompleted}</>,
<>{relativeTimestampFormatter(rule.updatedAt, 'Last updated ')}</>,
<LastRunTaskStatus
key={2}
lastRunError={lastRunError}
lastRunStatus={lastRunStatus}
/>,
]}
/>
</FlexBox>
</ResourceCard>
)
}

View File

@ -89,6 +89,7 @@ class LoginPageContents extends PureComponent<DispatchProps> {
clientID: config.clientID,
redirectUri: config.redirectURL,
responseType: 'code',
state: config.state,
})
} catch (error) {
console.error(error)
@ -421,6 +422,7 @@ class LoginPageContents extends PureComponent<DispatchProps> {
}
private displayErrorMessage = (errors, auth0Err) => {
const {activeTab} = this.state
// eslint-disable-next-line
if (/error in email/.test(auth0Err.code)) {
this.setState({
@ -431,8 +433,13 @@ class LoginPageContents extends PureComponent<DispatchProps> {
auth0Err.code === 'access_denied' ||
auth0Err.code === 'user_exists'
) {
const emailError = `An account with that email address already exists. Try logging in instead.`
this.setState({...errors, emailError})
if (activeTab === ActiveTab.Login) {
const emailError = `The email and password combination you submitted don't match. Please try again`
this.setState({...errors, emailError})
} else {
const emailError = `An account with that email address already exists. Try logging in instead.`
this.setState({...errors, emailError})
}
} else {
const emailError = `We have been notified of an issue while accessing your account. If this issue persists, please contact support@influxdata.com`
this.setState({...errors, emailError})

View File

@ -20,21 +20,18 @@ export default class ScraperRow extends PureComponent<Props> {
public render() {
const {scraper} = this.props
return (
<>
<ResourceCard
name={
<ResourceCard.EditableName
onUpdate={this.handleUpdateScraperName}
name={scraper.name}
noNameString={DEFAULT_SCRAPER_NAME}
buttonTestID="editable-name"
inputTestID="input-field"
/>
}
metaData={[<>Bucket: {scraper.bucket}</>, <>URL: {scraper.url}</>]}
contextMenu={this.contextMenu}
<ResourceCard contextMenu={this.contextMenu}>
<ResourceCard.EditableName
onUpdate={this.handleUpdateScraperName}
name={scraper.name}
noNameString={DEFAULT_SCRAPER_NAME}
buttonTestID="editable-name"
inputTestID="input-field"
/>
</>
<ResourceCard.Meta>
{[<>Bucket: {scraper.bucket}</>, <>URL: {scraper.url}</>]}
</ResourceCard.Meta>
</ResourceCard>
)
}

View File

@ -60,8 +60,15 @@ class LoadDataNavigation extends PureComponent<Props> {
},
]
const activeTabName = tabs.find(t => t.id === activeTab).text
return (
<Tabs orientation={Orientation.Horizontal} size={ComponentSize.Large}>
<Tabs
orientation={Orientation.Horizontal}
size={ComponentSize.Large}
dropdownBreakpoint={872}
dropdownLabel={activeTabName}
>
{tabs.map(t => {
let tabElement = (
<Tabs.Tab

View File

@ -4,6 +4,7 @@ import {connect} from 'react-redux'
import {AppState, Bucket, ResourceType} from 'src/types'
import {getAll} from 'src/resources/selectors'
import {getOrg} from 'src/organizations/selectors'
import loadServer from 'src/external/monaco.flux.server'
@ -13,9 +14,11 @@ const FluxBucketProvider: FC<{}> = () => {
const mstp = (state: AppState): {} => {
const buckets = getAll<Bucket>(state, ResourceType.Buckets)
const org = getOrg(state)
loadServer().then(server => {
server.updateBuckets(buckets.map(b => b.name))
server.setOrg(org.id || '')
})
return {}

View File

@ -10,7 +10,6 @@
align-content: center;
background-color: rgba($g5-pepper, 0.5);
border-radius: 50%;
margin-top: $ix-marg-a;
transition: color 0.25s ease, text-shadow 0.25s ease;
&.last-run-task-status__danger {

View File

@ -126,3 +126,9 @@
// External
@import '../../node_modules/@influxdata/react-custom-scrollbars/dist/styles.css';
// TODO: delete this later when it's addressed in Clockface
.cf-resource-card {
margin-bottom: $cf-border;
}

View File

@ -11,6 +11,8 @@ import {
IconFont,
InputLabel,
FlexBox,
AlignItems,
FlexDirection,
} from '@influxdata/clockface'
import {Context} from 'src/clockface'
import InlineLabels from 'src/shared/components/inlineLabels/InlineLabels'
@ -56,9 +58,20 @@ export class TaskCard extends PureComponent<Props & WithRouterProps> {
<ResourceCard
testID="task-card"
disabled={!this.isTaskActive}
labels={this.labels}
contextMenu={this.contextMenu}
name={
alignItems={AlignItems.Center}
margin={ComponentSize.Large}
direction={FlexDirection.Row}
>
<LastRunTaskStatus
lastRunError={task.lastRunError}
lastRunStatus={task.lastRunStatus}
/>
<FlexBox
alignItems={AlignItems.FlexStart}
direction={FlexDirection.Column}
margin={ComponentSize.Medium}
>
<ResourceCard.EditableName
onClick={this.handleNameClick}
onUpdate={this.handleRenameTask}
@ -68,19 +81,14 @@ export class TaskCard extends PureComponent<Props & WithRouterProps> {
buttonTestID="task-card--name-button"
inputTestID="task-card--input"
/>
}
metaData={[
this.activeToggle,
<>Last completed at {task.latestCompleted}</>,
<>{`Scheduled to run ${this.schedule}`}</>,
]}
toggle={
<LastRunTaskStatus
lastRunError={task.lastRunError}
lastRunStatus={task.lastRunStatus}
/>
}
/>
<ResourceCard.Meta>
{this.activeToggle}
<>Last completed at {task.latestCompleted}</>
<>{`Scheduled to run ${this.schedule}`}</>
</ResourceCard.Meta>
{this.labels}
</FlexBox>
</ResourceCard>
)
}

View File

@ -50,43 +50,38 @@ class CollectorRow extends PureComponent<Props & WithRouterProps> {
<ResourceCard
key={`telegraf-id--${collector.id}`}
testID="resource-card"
name={
<ResourceCard.EditableName
onUpdate={this.handleUpdateName}
onClick={this.handleNameClick}
name={collector.name}
noNameString={DEFAULT_COLLECTOR_NAME}
testID="collector-card--name"
buttonTestID="collector-card--name-button"
inputTestID="collector-card--input"
/>
}
description={
<ResourceCard.EditableDescription
onUpdate={this.handleUpdateDescription}
description={collector.description}
placeholder={`Describe ${collector.name}`}
/>
}
labels={this.labels}
metaData={[
contextMenu={this.contextMenu}
>
<ResourceCard.EditableName
onUpdate={this.handleUpdateName}
onClick={this.handleNameClick}
name={collector.name}
noNameString={DEFAULT_COLLECTOR_NAME}
testID="collector-card--name"
buttonTestID="collector-card--name-button"
inputTestID="collector-card--input"
/>
<ResourceCard.EditableDescription
onUpdate={this.handleUpdateDescription}
description={collector.description}
placeholder={`Describe ${collector.name}`}
/>
<ResourceCard.Meta>
<span key={`bucket-key--${collector.id}`} data-testid="bucket-name">
{/* todo(glinton): verify what sets this. It seems like it is using the 'config' section of 'influxdb_v2' output?? */}
Bucket: {collector.metadata.buckets.join(', ')}
</span>,
<>
<Link
to={`/orgs/${org.id}/load-data/telegrafs/${
collector.id
}/instructions`}
data-testid="setup-instructions-link"
>
Setup Instructions
</Link>
</>,
]}
contextMenu={this.contextMenu}
/>
</span>
<Link
to={`/orgs/${org.id}/load-data/telegrafs/${
collector.id
}/instructions`}
data-testid="setup-instructions-link"
>
Setup Instructions
</Link>
</ResourceCard.Meta>
{this.labels}
</ResourceCard>
)
}

View File

@ -1,6 +1,6 @@
// Libraries
import React, {PureComponent, MouseEvent} from 'react'
import {get} from 'lodash'
import {get, capitalize} from 'lodash'
import {connect} from 'react-redux'
import {withRouter, WithRouterProps} from 'react-router'
import {
@ -46,19 +46,17 @@ class StaticTemplateCard extends PureComponent<Props & WithRouterProps> {
const {template} = this.props
return (
<ResourceCard
testID="template-card"
contextMenu={this.contextMenu}
description={this.description}
name={
<ResourceCard.Name
onClick={this.handleNameClick}
name={template.meta.name}
testID="template-card--name"
/>
}
metaData={[this.templateType]}
/>
<ResourceCard testID="template-card" contextMenu={this.contextMenu}>
<ResourceCard.Name
onClick={this.handleNameClick}
name={template.meta.name}
testID="template-card--name"
/>
{this.description}
<ResourceCard.Meta>
{capitalize(get(template, 'content.data.type', ''))}
</ResourceCard.Meta>
</ResourceCard>
)
}
@ -88,16 +86,6 @@ class StaticTemplateCard extends PureComponent<Props & WithRouterProps> {
)
}
private get templateType(): JSX.Element {
const {template} = this.props
return (
<div className="resource-list--meta-item">
{get(template, 'content.data.type')}
</div>
)
}
private handleCreate = () => {
const {onCreateFromTemplate, name} = this.props

View File

@ -1,7 +1,7 @@
// Libraries
import React, {PureComponent, MouseEvent} from 'react'
import {connect} from 'react-redux'
import {get} from 'lodash'
import {get, capitalize} from 'lodash'
import {withRouter, WithRouterProps} from 'react-router'
import {
Button,
@ -61,31 +61,27 @@ class TemplateCard extends PureComponent<Props & WithRouterProps> {
const {template, onFilterChange} = this.props
return (
<ResourceCard
testID="template-card"
contextMenu={this.contextMenu}
name={
<ResourceCard.EditableName
onClick={this.handleNameClick}
onUpdate={this.handleUpdateTemplateName}
name={template.meta.name}
noNameString={DEFAULT_TEMPLATE_NAME}
testID="template-card--name"
buttonTestID="template-card--name-button"
inputTestID="template-card--input"
/>
}
description={this.description}
labels={
<InlineLabels
selectedLabelIDs={template.labels}
onFilterChange={onFilterChange}
onAddLabel={this.handleAddLabel}
onRemoveLabel={this.handleRemoveLabel}
/>
}
metaData={[this.templateType]}
/>
<ResourceCard testID="template-card" contextMenu={this.contextMenu}>
<ResourceCard.EditableName
onClick={this.handleNameClick}
onUpdate={this.handleUpdateTemplateName}
name={template.meta.name}
noNameString={DEFAULT_TEMPLATE_NAME}
testID="template-card--name"
buttonTestID="template-card--name-button"
inputTestID="template-card--input"
/>
{this.description}
<ResourceCard.Meta>
{capitalize(get(template, 'content.data.type', ''))}
</ResourceCard.Meta>
<InlineLabels
selectedLabelIDs={template.labels}
onFilterChange={onFilterChange}
onAddLabel={this.handleAddLabel}
onRemoveLabel={this.handleRemoveLabel}
/>
</ResourceCard>
)
}
@ -121,16 +117,6 @@ class TemplateCard extends PureComponent<Props & WithRouterProps> {
)
}
private get templateType(): JSX.Element {
const {template} = this.props
return (
<div className="resource-list--meta-item">
{get(template, 'meta.type', '')}
</div>
)
}
private get contextMenu(): JSX.Element {
const {
template: {id},

View File

@ -11,4 +11,5 @@ export type Auth0Config = {
domain: string
redirectURL: string
socialSignUpOn: boolean
state: string
}

View File

@ -38,7 +38,6 @@ class VariableCard extends PureComponent<Props & WithRouterProps> {
return (
<ResourceCard
testID="resource-card"
labels={this.labels}
contextMenu={
<VariableContextMenu
variable={variable}
@ -47,14 +46,16 @@ class VariableCard extends PureComponent<Props & WithRouterProps> {
onDelete={onDeleteVariable}
/>
}
name={
<ResourceCard.Name
onClick={this.handleNameClick}
name={variable.name}
/>
}
metaData={[<>Type: {variable.arguments.type}</>]}
/>
>
<ResourceCard.Name
onClick={this.handleNameClick}
name={variable.name}
/>
<ResourceCard.Meta>
<>Type: {variable.arguments.type}</>
</ResourceCard.Meta>
{this.labels}
</ResourceCard>
)
}

View File

@ -4,6 +4,9 @@ const merge = require('webpack-merge')
const common = require('./webpack.common.ts')
const PORT = parseInt(process.env.PORT, 10) || 8080
const PUBLIC = process.env.PUBLIC || undefined
const {
BASE_PATH,
} = require('./src/utils/env')
const BundleAnalyzerPlugin = require('webpack-bundle-analyzer').BundleAnalyzerPlugin;
const webpack = require('webpack')
@ -19,7 +22,9 @@ module.exports = merge(common, {
},
devServer: {
hot: true,
historyApiFallback: true,
historyApiFallback: {
index: `${BASE_PATH}/index.html`
},
compress: true,
proxy: {
'/api/v2': 'http://localhost:9999',
@ -30,6 +35,8 @@ module.exports = merge(common, {
host: '0.0.0.0',
port: PORT,
public: PUBLIC,
publicPath: PUBLIC,
sockPath: `${BASE_PATH}hmr`
},
plugins: [
new webpack.DllReferencePlugin({

View File

@ -1018,15 +1018,15 @@
debug "^3.1.0"
lodash.once "^4.1.1"
"@influxdata/clockface@2.0.3":
version "2.0.3"
resolved "https://registry.yarnpkg.com/@influxdata/clockface/-/clockface-2.0.3.tgz#a524bfe57898506f64cb735d5a457677c3ee2d32"
integrity sha512-wXJ30Bm0LC2HBSfwEQOsAznqrBqQYgn4SVb30Ycm6dghe0JOh+S2pWDD0/jWw8deN8M2pShKtz4FjW0NPyXFXg==
"@influxdata/clockface@2.1.0":
version "2.1.0"
resolved "https://registry.yarnpkg.com/@influxdata/clockface/-/clockface-2.1.0.tgz#786100fffc9794213ce68eb5fd994505c8c980ad"
integrity sha512-ks7wtiNeJWB6LeFFw/aAOHc3sulmjDR/Ys0+IeLHRDrybdmlt3PZJJEIVYpNNZq5t7jSvO/y2oOBW7BPc1rKjg==
"@influxdata/flux-lsp-browser@^0.4.2":
version "0.4.2"
resolved "https://registry.yarnpkg.com/@influxdata/flux-lsp-browser/-/flux-lsp-browser-0.4.2.tgz#b1126004e6d80e0426792475c4ec330dce6a3b60"
integrity sha512-ecvcRC7teOldZl+trgCmArnS3nVtUNcG5GQzwfglOj3ubJMxCJtqksqfCYSwQwAcfwWRFCUkCHccyh/qLYXjdA==
"@influxdata/flux-lsp-browser@^0.5.1":
version "0.5.1"
resolved "https://registry.yarnpkg.com/@influxdata/flux-lsp-browser/-/flux-lsp-browser-0.5.1.tgz#cc8470fb69ff31439db8465731485d4979a84666"
integrity sha512-YUAad5ap0Y9llsEt0Nqe/IPhbO9SjrkDQ+L9EsSDwjVCzi84EYxk5XXtS2ZwuzBIl33Z4qdacMwjwI9aAYQZkg==
"@influxdata/flux-parser@^0.3.0":
version "0.3.0"