Merge pull request #17777 from influxdata/chore/merge-master
chore: merge master into algowpull/17807/head
commit
8e098b51c9
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
icontext "github.com/influxdata/influxdb/v2/context"
|
||||
)
|
||||
|
||||
var _ influxdb.OrganizationService = (*OrgService)(nil)
|
||||
|
@ -43,6 +44,18 @@ func (s *OrgService) FindOrganization(ctx context.Context, filter influxdb.Organ
|
|||
|
||||
// FindOrganizations retrieves all organizations that match the provided filter and then filters the list down to only the resources that are authorized.
|
||||
func (s *OrgService) FindOrganizations(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) {
|
||||
if filter.Name == nil && filter.ID == nil && filter.UserID == nil {
|
||||
// if the user doesnt have permission to look up all orgs we need to add this users id to the filter to save lookup time
|
||||
auth, err := icontext.GetAuthorizer(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if _, _, err := AuthorizeReadGlobal(ctx, influxdb.OrgsResourceType); err != nil {
|
||||
userid := auth.GetUserID()
|
||||
filter.UserID = &userid
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data
|
||||
// will likely be expensive.
|
||||
os, _, err := s.s.FindOrganizations(ctx, filter, opt...)
|
||||
|
|
|
@ -647,16 +647,19 @@ func AddLabels(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("add same twice", func(t *testing.T) {
|
||||
// TODO(affo)
|
||||
t.Skip("see https://github.com/influxdata/influxdb/issues/17092")
|
||||
|
||||
serverFn, clientFn, fx := setup(t)
|
||||
server := serverFn(fx.auth(influxdb.WriteAction))
|
||||
server := serverFn(func() influxdb.Authorizer {
|
||||
a := fx.auth(influxdb.WriteAction)
|
||||
// The LabelService uses a "standard auth" mode.
|
||||
// That's why we need to add further permissions and the org ones are not enough.
|
||||
fx.addLabelPermission(a, influxdb.WriteAction, fx.Labels[0].ID)
|
||||
return a
|
||||
}())
|
||||
defer server.Close()
|
||||
client := clientFn(server.URL)
|
||||
if _, err := client.AddDocumentLabel(context.Background(), namespace, fx.Document.ID, fx.Labels[0].ID); err != nil {
|
||||
if !strings.Contains(err.Error(), "???") {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
if !strings.Contains(err.Error(), influxdb.ErrLabelExistsOnResource.Msg) {
|
||||
t.Errorf("unexpected error: %v", err.Error())
|
||||
}
|
||||
} else {
|
||||
t.Error("expected error got none")
|
||||
|
|
|
@ -257,6 +257,15 @@ func (h *OrgHandler) handleGetOrgs(w http.ResponseWriter, r *http.Request) {
|
|||
filter.ID = id
|
||||
}
|
||||
|
||||
if userID := qp.Get("userID"); userID != "" {
|
||||
id, err := influxdb.IDFromString(userID)
|
||||
if err != nil {
|
||||
h.API.Err(w, err)
|
||||
return
|
||||
}
|
||||
filter.UserID = id
|
||||
}
|
||||
|
||||
orgs, _, err := h.OrgSVC.FindOrganizations(r.Context(), filter)
|
||||
if err != nil {
|
||||
h.API.Err(w, err)
|
||||
|
|
|
@ -3751,6 +3751,11 @@ paths:
|
|||
schema:
|
||||
type: string
|
||||
description: Filter organizations to a specific organization ID.
|
||||
- in: query
|
||||
name: userID
|
||||
schema:
|
||||
type: string
|
||||
description: Filter organizations to a specific user ID.
|
||||
responses:
|
||||
'200':
|
||||
description: A list of organizations
|
||||
|
@ -7574,8 +7579,8 @@ components:
|
|||
items:
|
||||
type: object
|
||||
properties:
|
||||
remove:
|
||||
type: boolean
|
||||
stateStatus:
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
pkgName:
|
||||
|
@ -7765,8 +7770,8 @@ components:
|
|||
items:
|
||||
type: object
|
||||
properties:
|
||||
remove:
|
||||
type: boolean
|
||||
stateStatus:
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
pkgName:
|
||||
|
@ -7810,8 +7815,8 @@ components:
|
|||
items:
|
||||
type: object
|
||||
properties:
|
||||
remove:
|
||||
type: boolean
|
||||
stateStatus:
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
pkgName:
|
||||
|
|
11
kv/label.go
11
kv/label.go
|
@ -209,6 +209,17 @@ func (s *Service) createLabelMapping(ctx context.Context, tx Tx, m *influxdb.Lab
|
|||
return err
|
||||
}
|
||||
|
||||
ls := []*influxdb.Label{}
|
||||
err := s.findResourceLabels(ctx, tx, influxdb.LabelMappingFilter{ResourceID: m.ResourceID, ResourceType: m.ResourceType}, &ls)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < len(ls); i++ {
|
||||
if ls[i].ID == m.LabelID {
|
||||
return influxdb.ErrLabelExistsOnResource
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.putLabelMapping(ctx, tx, m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
23
kv/org.go
23
kv/org.go
|
@ -236,6 +236,29 @@ func (s *Service) FindOrganizations(ctx context.Context, filter influxdb.Organiz
|
|||
}
|
||||
|
||||
os := []*influxdb.Organization{}
|
||||
|
||||
if filter.UserID != nil {
|
||||
// find urms for orgs with this user
|
||||
urms, _, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{
|
||||
UserID: *filter.UserID,
|
||||
ResourceType: influxdb.OrgsResourceType,
|
||||
}, opt...)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// find orgs by the urm's resource ids.
|
||||
for _, urm := range urms {
|
||||
o, err := s.FindOrganizationByID(ctx, urm.ResourceID)
|
||||
if err == nil {
|
||||
// if there is an error then this is a crufty urm and we should just move on
|
||||
os = append(os, o)
|
||||
}
|
||||
}
|
||||
|
||||
return os, len(os), nil
|
||||
}
|
||||
|
||||
filterFn := filterOrganizationsFn(filter)
|
||||
err := s.kv.View(ctx, func(tx Tx) error {
|
||||
return forEachOrganization(ctx, tx, func(o *influxdb.Organization) bool {
|
||||
|
|
7
label.go
7
label.go
|
@ -25,6 +25,13 @@ var (
|
|||
Code: EInvalid,
|
||||
Msg: "label name is empty",
|
||||
}
|
||||
|
||||
// ErrLabelExistsOnResource is used when attempting to add a label to a resource
|
||||
// when that label already exists on the resource
|
||||
ErrLabelExistsOnResource = &Error{
|
||||
Code: EConflict,
|
||||
Msg: "Cannot add label, label already exists on resource",
|
||||
}
|
||||
)
|
||||
|
||||
// LabelService represents a service for managing resource labels
|
||||
|
|
|
@ -71,8 +71,9 @@ var ErrInvalidOrgFilter = &Error{
|
|||
|
||||
// OrganizationFilter represents a set of filter that restrict the returned results.
|
||||
type OrganizationFilter struct {
|
||||
Name *string
|
||||
ID *ID
|
||||
Name *string
|
||||
ID *ID
|
||||
UserID *ID
|
||||
}
|
||||
|
||||
func ErrInternalOrgServiceError(op string, err error) *Error {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -79,6 +80,14 @@ func (r *Req) Do(handler http.Handler) *Resp {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *Req) SetFormValue(k, v string) *Req {
|
||||
if r.req.Form == nil {
|
||||
r.req.Form = make(url.Values)
|
||||
}
|
||||
r.req.Form.Set(k, v)
|
||||
return r
|
||||
}
|
||||
|
||||
// Headers allows the user to set headers on the http request.
|
||||
func (r *Req) Headers(k, v string, rest ...string) *Req {
|
||||
headers := append(rest, k, v)
|
||||
|
|
|
@ -45,11 +45,33 @@ func TestHTTP(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Post", func(t *testing.T) {
|
||||
testttp.
|
||||
Post(t, "/", nil).
|
||||
Do(svr).
|
||||
ExpectStatus(http.StatusCreated).
|
||||
ExpectBody(assertBody(t, http.MethodPost))
|
||||
t.Run("basic", func(t *testing.T) {
|
||||
testttp.
|
||||
Post(t, "/", nil).
|
||||
Do(svr).
|
||||
ExpectStatus(http.StatusCreated).
|
||||
ExpectBody(assertBody(t, http.MethodPost))
|
||||
})
|
||||
|
||||
t.Run("with form values", func(t *testing.T) {
|
||||
svr := http.NewServeMux()
|
||||
svr.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
r.ParseForm()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(r.FormValue("key")))
|
||||
}))
|
||||
|
||||
testttp.
|
||||
Post(t, "/", nil).
|
||||
SetFormValue("key", "val").
|
||||
Do(svr).
|
||||
ExpectStatus(http.StatusOK).
|
||||
ExpectBody(func(body *bytes.Buffer) {
|
||||
if expected, got := "val", body.String(); expected != got {
|
||||
t.Fatalf("did not get form value; expected=%q got=%q", expected, got)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("PostJSON", func(t *testing.T) {
|
||||
|
|
1026
pkger/models.go
1026
pkger/models.go
File diff suppressed because it is too large
Load Diff
|
@ -476,11 +476,6 @@ func TestPkg(t *testing.T) {
|
|||
kind Kind
|
||||
validName string
|
||||
}{
|
||||
{
|
||||
pkgFile: "testdata/dashboard.yml",
|
||||
kind: KindDashboard,
|
||||
validName: "dash_1",
|
||||
},
|
||||
{
|
||||
pkgFile: "testdata/label.yml",
|
||||
kind: KindLabel,
|
||||
|
@ -491,16 +486,6 @@ func TestPkg(t *testing.T) {
|
|||
kind: KindNotificationRule,
|
||||
validName: "rule_UUID",
|
||||
},
|
||||
{
|
||||
pkgFile: "testdata/tasks.yml",
|
||||
kind: KindTask,
|
||||
validName: "task_UUID",
|
||||
},
|
||||
{
|
||||
pkgFile: "testdata/telegraf.yml",
|
||||
kind: KindTelegraf,
|
||||
validName: "first_tele_config",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
|
@ -409,11 +409,6 @@ func (p *Pkg) addObjectForRemoval(k Kind, pkgName string, id influxdb.ID) {
|
|||
}
|
||||
|
||||
switch k {
|
||||
case KindDashboard:
|
||||
p.mDashboards[pkgName] = &dashboard{
|
||||
identity: newIdentity,
|
||||
id: id,
|
||||
}
|
||||
case KindLabel:
|
||||
p.mLabels[pkgName] = &label{
|
||||
identity: newIdentity,
|
||||
|
@ -424,26 +419,11 @@ func (p *Pkg) addObjectForRemoval(k Kind, pkgName string, id influxdb.ID) {
|
|||
identity: newIdentity,
|
||||
id: id,
|
||||
}
|
||||
case KindTask:
|
||||
p.mTasks[pkgName] = &task{
|
||||
identity: newIdentity,
|
||||
id: id,
|
||||
}
|
||||
case KindTelegraf:
|
||||
p.mTelegrafs[pkgName] = &telegraf{
|
||||
identity: newIdentity,
|
||||
config: influxdb.TelegrafConfig{ID: id},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pkg) getObjectIDSetter(k Kind, pkgName string) (func(influxdb.ID), bool) {
|
||||
switch k {
|
||||
case KindDashboard:
|
||||
d, ok := p.mDashboards[pkgName]
|
||||
return func(id influxdb.ID) {
|
||||
d.id = id
|
||||
}, ok
|
||||
case KindLabel:
|
||||
l, ok := p.mLabels[pkgName]
|
||||
return func(id influxdb.ID) {
|
||||
|
@ -454,16 +434,6 @@ func (p *Pkg) getObjectIDSetter(k Kind, pkgName string) (func(influxdb.ID), bool
|
|||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
}, ok
|
||||
case KindTask:
|
||||
t, ok := p.mTasks[pkgName]
|
||||
return func(id influxdb.ID) {
|
||||
t.id = id
|
||||
}, ok
|
||||
case KindTelegraf:
|
||||
t, ok := p.mTelegrafs[pkgName]
|
||||
return func(id influxdb.ID) {
|
||||
t.config.ID = id
|
||||
}, ok
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
|
@ -578,7 +548,7 @@ func (p *Pkg) dashboards() []*dashboard {
|
|||
for _, d := range p.mDashboards {
|
||||
dashes = append(dashes, d)
|
||||
}
|
||||
sort.Slice(dashes, func(i, j int) bool { return dashes[i].Name() < dashes[j].Name() })
|
||||
sort.Slice(dashes, func(i, j int) bool { return dashes[i].PkgName() < dashes[j].PkgName() })
|
||||
return dashes
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1662,9 +1662,10 @@ spec:
|
|||
t.Run("happy path", func(t *testing.T) {
|
||||
testfileRunner(t, "testdata/dashboard", func(t *testing.T, pkg *Pkg) {
|
||||
sum := pkg.Summary()
|
||||
require.Len(t, sum.Dashboards, 1)
|
||||
require.Len(t, sum.Dashboards, 2)
|
||||
|
||||
actual := sum.Dashboards[0]
|
||||
assert.Equal(t, "dash_1", actual.PkgName)
|
||||
assert.Equal(t, "display name", actual.Name)
|
||||
assert.Equal(t, "desc1", actual.Description)
|
||||
|
||||
|
@ -1699,6 +1700,11 @@ spec:
|
|||
assert.Equal(t, "text", c.Type)
|
||||
assert.Equal(t, "#8F8AF4", c.Hex)
|
||||
assert.Equal(t, 3.0, c.Value)
|
||||
|
||||
actual2 := sum.Dashboards[1]
|
||||
assert.Equal(t, "dash_2", actual2.PkgName)
|
||||
assert.Equal(t, "dash_2", actual2.Name)
|
||||
assert.Equal(t, "desc", actual2.Description)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -2600,18 +2606,32 @@ spec:
|
|||
actual := sum.Dashboards[0]
|
||||
assert.Equal(t, "dash_1", actual.Name)
|
||||
|
||||
require.Len(t, actual.LabelAssociations, 1)
|
||||
actualLabel := actual.LabelAssociations[0]
|
||||
assert.Equal(t, "label_1", actualLabel.Name)
|
||||
require.Len(t, actual.LabelAssociations, 2)
|
||||
assert.Equal(t, "label_1", actual.LabelAssociations[0].Name)
|
||||
assert.Equal(t, "label_2", actual.LabelAssociations[1].Name)
|
||||
|
||||
assert.Contains(t, sum.LabelMappings, SummaryLabelMapping{
|
||||
Status: StateStatusNew,
|
||||
ResourceType: influxdb.DashboardsResourceType,
|
||||
ResourcePkgName: "dash_1",
|
||||
ResourceName: "dash_1",
|
||||
LabelPkgName: "label_1",
|
||||
LabelName: "label_1",
|
||||
})
|
||||
expectedMappings := []SummaryLabelMapping{
|
||||
{
|
||||
Status: StateStatusNew,
|
||||
ResourceType: influxdb.DashboardsResourceType,
|
||||
ResourcePkgName: "dash_1",
|
||||
ResourceName: "dash_1",
|
||||
LabelPkgName: "label_1",
|
||||
LabelName: "label_1",
|
||||
},
|
||||
{
|
||||
Status: StateStatusNew,
|
||||
ResourceType: influxdb.DashboardsResourceType,
|
||||
ResourcePkgName: "dash_1",
|
||||
ResourceName: "dash_1",
|
||||
LabelPkgName: "label_2",
|
||||
LabelName: "label_2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectedMapping := range expectedMappings {
|
||||
assert.Contains(t, sum.LabelMappings, expectedMapping)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -3543,10 +3563,11 @@ spec:
|
|||
assert.Equal(t, "display name", actual.TelegrafConfig.Name)
|
||||
assert.Equal(t, "desc", actual.TelegrafConfig.Description)
|
||||
|
||||
require.Len(t, actual.LabelAssociations, 1)
|
||||
require.Len(t, actual.LabelAssociations, 2)
|
||||
assert.Equal(t, "label_1", actual.LabelAssociations[0].Name)
|
||||
assert.Equal(t, "label_2", actual.LabelAssociations[1].Name)
|
||||
|
||||
require.Len(t, sum.LabelMappings, 1)
|
||||
require.Len(t, sum.LabelMappings, 2)
|
||||
expectedMapping := SummaryLabelMapping{
|
||||
Status: StateStatusNew,
|
||||
ResourcePkgName: "first_tele_config",
|
||||
|
@ -3556,6 +3577,9 @@ spec:
|
|||
ResourceType: influxdb.TelegrafsResourceType,
|
||||
}
|
||||
assert.Equal(t, expectedMapping, sum.LabelMappings[0])
|
||||
expectedMapping.LabelPkgName = "label_2"
|
||||
expectedMapping.LabelName = "label_2"
|
||||
assert.Equal(t, expectedMapping, sum.LabelMappings[1])
|
||||
})
|
||||
})
|
||||
|
||||
|
|
134
pkger/service.go
134
pkger/service.go
|
@ -686,10 +686,6 @@ func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts
|
|||
}
|
||||
|
||||
var diff Diff
|
||||
diff.Dashboards = s.dryRunDashboards(pkg)
|
||||
diff.Tasks = s.dryRunTasks(pkg)
|
||||
diff.Telegrafs = s.dryRunTelegraf(pkg)
|
||||
|
||||
diffRules, err := s.dryRunNotificationRules(ctx, orgID, pkg)
|
||||
if err != nil {
|
||||
return Summary{}, Diff{}, nil, err
|
||||
|
@ -712,8 +708,11 @@ func (s *Service) dryRun(ctx context.Context, orgID influxdb.ID, pkg *Pkg, opts
|
|||
|
||||
diff.Buckets = stateDiff.Buckets
|
||||
diff.Checks = stateDiff.Checks
|
||||
diff.Dashboards = stateDiff.Dashboards
|
||||
diff.NotificationEndpoints = stateDiff.NotificationEndpoints
|
||||
diff.Labels = stateDiff.Labels
|
||||
diff.Tasks = stateDiff.Tasks
|
||||
diff.Telegrafs = stateDiff.Telegrafs
|
||||
diff.Variables = stateDiff.Variables
|
||||
diff.LabelMappings = append(stateDiff.LabelMappings, diff.LabelMappings...)
|
||||
|
||||
|
@ -757,16 +756,6 @@ func (s *Service) dryRunChecks(ctx context.Context, orgID influxdb.ID, checks ma
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Service) dryRunDashboards(pkg *Pkg) []DiffDashboard {
|
||||
dashs := pkg.dashboards()
|
||||
|
||||
diffs := make([]DiffDashboard, 0, len(dashs))
|
||||
for _, d := range dashs {
|
||||
diffs = append(diffs, newDiffDashboard(d))
|
||||
}
|
||||
return diffs
|
||||
}
|
||||
|
||||
func (s *Service) dryRunLabels(ctx context.Context, orgID influxdb.ID, labels map[string]*stateLabel) {
|
||||
for _, pkgLabel := range labels {
|
||||
pkgLabel.orgID = orgID
|
||||
|
@ -897,23 +886,6 @@ func (s *Service) dryRunSecrets(ctx context.Context, orgID influxdb.ID, pkg *Pkg
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) dryRunTasks(pkg *Pkg) []DiffTask {
|
||||
var diffs []DiffTask
|
||||
for _, t := range pkg.tasks() {
|
||||
diffs = append(diffs, newDiffTask(t))
|
||||
}
|
||||
return diffs
|
||||
}
|
||||
|
||||
func (s *Service) dryRunTelegraf(pkg *Pkg) []DiffTelegraf {
|
||||
telegrafs := pkg.telegrafs()
|
||||
diffs := make([]DiffTelegraf, 0, len(telegrafs))
|
||||
for _, t := range telegrafs {
|
||||
diffs = append(diffs, newDiffTelegraf(t))
|
||||
}
|
||||
return diffs
|
||||
}
|
||||
|
||||
func (s *Service) dryRunVariables(ctx context.Context, orgID influxdb.ID, vars map[string]*stateVariable) {
|
||||
existingVars, _ := s.getAllPlatformVariables(ctx, orgID)
|
||||
|
||||
|
@ -958,10 +930,7 @@ type (
|
|||
|
||||
func (s *Service) dryRunLabelMappings(ctx context.Context, pkg *Pkg, state *stateCoordinator) ([]DiffLabelMapping, error) {
|
||||
mappers := []labelMappers{
|
||||
mapperDashboards(pkg.dashboards()),
|
||||
mapperNotificationRules(pkg.notificationRules()),
|
||||
mapperTasks(pkg.tasks()),
|
||||
mapperTelegrafs(pkg.telegrafs()),
|
||||
}
|
||||
|
||||
diffs := make([]DiffLabelMapping, 0)
|
||||
|
@ -1087,6 +1056,17 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
|
|||
mappings = append(mappings, mm...)
|
||||
}
|
||||
|
||||
for _, d := range state.mDashboards {
|
||||
if IsRemoval(d.stateStatus) {
|
||||
continue
|
||||
}
|
||||
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mappings = append(mappings, mm...)
|
||||
}
|
||||
|
||||
for _, e := range state.mEndpoints {
|
||||
if IsRemoval(e.stateStatus) {
|
||||
continue
|
||||
|
@ -1098,6 +1078,28 @@ func (s *Service) dryRunLabelMappingsV2(ctx context.Context, state *stateCoordin
|
|||
mappings = append(mappings, mm...)
|
||||
}
|
||||
|
||||
for _, t := range state.mTasks {
|
||||
if IsRemoval(t.stateStatus) {
|
||||
continue
|
||||
}
|
||||
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mappings = append(mappings, mm...)
|
||||
}
|
||||
|
||||
for _, t := range state.mTelegrafs {
|
||||
if IsRemoval(t.stateStatus) {
|
||||
continue
|
||||
}
|
||||
mm, err := s.dryRunResourceLabelMappingV2(ctx, state, stateLabelsByResName, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mappings = append(mappings, mm...)
|
||||
}
|
||||
|
||||
for _, v := range state.mVariables {
|
||||
if IsRemoval(v.stateStatus) {
|
||||
continue
|
||||
|
@ -1183,6 +1185,7 @@ func (s *Service) addStackState(ctx context.Context, stackID influxdb.ID, pkg *P
|
|||
KindCheck,
|
||||
KindLabel,
|
||||
KindNotificationEndpoint,
|
||||
KindTask,
|
||||
KindVariable,
|
||||
}
|
||||
|
||||
|
@ -1319,10 +1322,10 @@ func (s *Service) apply(ctx context.Context, coordinator *rollbackCoordinator, o
|
|||
s.applyVariables(ctx, state.variables()),
|
||||
s.applyBuckets(ctx, state.buckets()),
|
||||
s.applyChecks(ctx, state.checks()),
|
||||
s.applyDashboards(pkg.dashboards()),
|
||||
s.applyDashboards(state.dashboards()),
|
||||
s.applyNotificationEndpoints(ctx, userID, state.endpoints()),
|
||||
s.applyTasks(pkg.tasks()),
|
||||
s.applyTelegrafs(pkg.telegrafs()),
|
||||
s.applyTasks(state.tasks()),
|
||||
s.applyTelegrafs(state.telegrafConfigs()),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1579,23 +1582,23 @@ func (s *Service) applyCheck(ctx context.Context, c *stateCheck, userID influxdb
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Service) applyDashboards(dashboards []*dashboard) applier {
|
||||
func (s *Service) applyDashboards(dashboards []*stateDashboard) applier {
|
||||
const resource = "dashboard"
|
||||
|
||||
mutex := new(doMutex)
|
||||
rollbackDashboards := make([]*dashboard, 0, len(dashboards))
|
||||
rollbackDashboards := make([]*stateDashboard, 0, len(dashboards))
|
||||
|
||||
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
||||
var d dashboard
|
||||
var d *stateDashboard
|
||||
mutex.Do(func() {
|
||||
dashboards[i].OrgID = orgID
|
||||
d = *dashboards[i]
|
||||
dashboards[i].orgID = orgID
|
||||
d = dashboards[i]
|
||||
})
|
||||
|
||||
influxBucket, err := s.applyDashboard(ctx, d)
|
||||
if err != nil {
|
||||
return &applyErrBody{
|
||||
name: d.Name(),
|
||||
name: d.parserDash.Name(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
@ -1623,12 +1626,12 @@ func (s *Service) applyDashboards(dashboards []*dashboard) applier {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Service) applyDashboard(ctx context.Context, d dashboard) (influxdb.Dashboard, error) {
|
||||
cells := convertChartsToCells(d.Charts)
|
||||
func (s *Service) applyDashboard(ctx context.Context, d *stateDashboard) (influxdb.Dashboard, error) {
|
||||
cells := convertChartsToCells(d.parserDash.Charts)
|
||||
influxDashboard := influxdb.Dashboard{
|
||||
OrganizationID: d.OrgID,
|
||||
Description: d.Description,
|
||||
Name: d.Name(),
|
||||
OrganizationID: d.orgID,
|
||||
Description: d.parserDash.Description,
|
||||
Name: d.parserDash.Name(),
|
||||
Cells: cells,
|
||||
}
|
||||
err := s.dashSVC.CreateDashboard(ctx, &influxDashboard)
|
||||
|
@ -2057,34 +2060,37 @@ func (s *Service) applySecrets(secrets map[string]string) applier {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Service) applyTasks(tasks []*task) applier {
|
||||
func (s *Service) applyTasks(tasks []*stateTask) applier {
|
||||
const resource = "tasks"
|
||||
|
||||
mutex := new(doMutex)
|
||||
rollbackTasks := make([]task, 0, len(tasks))
|
||||
rollbackTasks := make([]*stateTask, 0, len(tasks))
|
||||
|
||||
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
||||
var t task
|
||||
var t *stateTask
|
||||
mutex.Do(func() {
|
||||
tasks[i].orgID = orgID
|
||||
t = *tasks[i]
|
||||
t = tasks[i]
|
||||
})
|
||||
|
||||
newTask, err := s.taskSVC.CreateTask(ctx, influxdb.TaskCreate{
|
||||
Type: influxdb.TaskSystemType,
|
||||
Flux: t.flux(),
|
||||
Flux: t.parserTask.flux(),
|
||||
OwnerID: userID,
|
||||
Description: t.description,
|
||||
Status: string(t.Status()),
|
||||
Description: t.parserTask.description,
|
||||
Status: string(t.parserTask.Status()),
|
||||
OrganizationID: t.orgID,
|
||||
})
|
||||
if err != nil {
|
||||
return &applyErrBody{name: t.Name(), msg: err.Error()}
|
||||
return &applyErrBody{
|
||||
name: t.parserTask.Name(),
|
||||
msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
mutex.Do(func() {
|
||||
tasks[i].id = newTask.ID
|
||||
rollbackTasks = append(rollbackTasks, *tasks[i])
|
||||
rollbackTasks = append(rollbackTasks, tasks[i])
|
||||
})
|
||||
|
||||
return nil
|
||||
|
@ -2106,16 +2112,16 @@ func (s *Service) applyTasks(tasks []*task) applier {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Service) applyTelegrafs(teles []*telegraf) applier {
|
||||
func (s *Service) applyTelegrafs(teles []*stateTelegraf) applier {
|
||||
const resource = "telegrafs"
|
||||
|
||||
mutex := new(doMutex)
|
||||
rollbackTelegrafs := make([]*telegraf, 0, len(teles))
|
||||
rollbackTelegrafs := make([]*stateTelegraf, 0, len(teles))
|
||||
|
||||
createFn := func(ctx context.Context, i int, orgID, userID influxdb.ID) *applyErrBody {
|
||||
var cfg influxdb.TelegrafConfig
|
||||
mutex.Do(func() {
|
||||
teles[i].config.OrgID = orgID
|
||||
teles[i].orgID = orgID
|
||||
cfg = teles[i].summarize().TelegrafConfig
|
||||
})
|
||||
|
||||
|
@ -2128,7 +2134,7 @@ func (s *Service) applyTelegrafs(teles []*telegraf) applier {
|
|||
}
|
||||
|
||||
mutex.Do(func() {
|
||||
teles[i].config = cfg
|
||||
teles[i].id = cfg.ID
|
||||
rollbackTelegrafs = append(rollbackTelegrafs, teles[i])
|
||||
})
|
||||
|
||||
|
@ -2608,8 +2614,11 @@ func newSummaryFromStatePkg(pkg *Pkg, state *stateCoordinator) Summary {
|
|||
pkgSum := pkg.Summary()
|
||||
pkgSum.Buckets = stateSum.Buckets
|
||||
pkgSum.Checks = stateSum.Checks
|
||||
pkgSum.Dashboards = stateSum.Dashboards
|
||||
pkgSum.NotificationEndpoints = stateSum.NotificationEndpoints
|
||||
pkgSum.Labels = stateSum.Labels
|
||||
pkgSum.Tasks = stateSum.Tasks
|
||||
pkgSum.TelegrafConfigs = stateSum.TelegrafConfigs
|
||||
pkgSum.Variables = stateSum.Variables
|
||||
|
||||
// filter out label mappings that are from pgk and replace with those
|
||||
|
@ -2617,7 +2626,10 @@ func newSummaryFromStatePkg(pkg *Pkg, state *stateCoordinator) Summary {
|
|||
resourcesToSkip := map[influxdb.ResourceType]bool{
|
||||
influxdb.BucketsResourceType: true,
|
||||
influxdb.ChecksResourceType: true,
|
||||
influxdb.DashboardsResourceType: true,
|
||||
influxdb.NotificationEndpointResourceType: true,
|
||||
influxdb.TasksResourceType: true,
|
||||
influxdb.TelegrafsResourceType: true,
|
||||
influxdb.VariablesResourceType: true,
|
||||
}
|
||||
for _, lm := range pkgSum.LabelMappings {
|
||||
|
|
|
@ -8,22 +8,28 @@ import (
|
|||
)
|
||||
|
||||
type stateCoordinator struct {
|
||||
mBuckets map[string]*stateBucket
|
||||
mChecks map[string]*stateCheck
|
||||
mEndpoints map[string]*stateEndpoint
|
||||
mLabels map[string]*stateLabel
|
||||
mVariables map[string]*stateVariable
|
||||
mBuckets map[string]*stateBucket
|
||||
mChecks map[string]*stateCheck
|
||||
mDashboards map[string]*stateDashboard
|
||||
mEndpoints map[string]*stateEndpoint
|
||||
mLabels map[string]*stateLabel
|
||||
mTasks map[string]*stateTask
|
||||
mTelegrafs map[string]*stateTelegraf
|
||||
mVariables map[string]*stateVariable
|
||||
|
||||
labelMappings []stateLabelMapping
|
||||
}
|
||||
|
||||
func newStateCoordinator(pkg *Pkg) *stateCoordinator {
|
||||
state := stateCoordinator{
|
||||
mBuckets: make(map[string]*stateBucket),
|
||||
mChecks: make(map[string]*stateCheck),
|
||||
mEndpoints: make(map[string]*stateEndpoint),
|
||||
mLabels: make(map[string]*stateLabel),
|
||||
mVariables: make(map[string]*stateVariable),
|
||||
mBuckets: make(map[string]*stateBucket),
|
||||
mChecks: make(map[string]*stateCheck),
|
||||
mDashboards: make(map[string]*stateDashboard),
|
||||
mEndpoints: make(map[string]*stateEndpoint),
|
||||
mLabels: make(map[string]*stateLabel),
|
||||
mTasks: make(map[string]*stateTask),
|
||||
mTelegrafs: make(map[string]*stateTelegraf),
|
||||
mVariables: make(map[string]*stateVariable),
|
||||
}
|
||||
|
||||
for _, pkgBkt := range pkg.buckets() {
|
||||
|
@ -38,6 +44,12 @@ func newStateCoordinator(pkg *Pkg) *stateCoordinator {
|
|||
stateStatus: StateStatusNew,
|
||||
}
|
||||
}
|
||||
for _, pkgDash := range pkg.dashboards() {
|
||||
state.mDashboards[pkgDash.PkgName()] = &stateDashboard{
|
||||
parserDash: pkgDash,
|
||||
stateStatus: StateStatusNew,
|
||||
}
|
||||
}
|
||||
for _, pkgEndpoint := range pkg.notificationEndpoints() {
|
||||
state.mEndpoints[pkgEndpoint.PkgName()] = &stateEndpoint{
|
||||
parserEndpoint: pkgEndpoint,
|
||||
|
@ -50,6 +62,18 @@ func newStateCoordinator(pkg *Pkg) *stateCoordinator {
|
|||
stateStatus: StateStatusNew,
|
||||
}
|
||||
}
|
||||
for _, pkgTask := range pkg.tasks() {
|
||||
state.mTasks[pkgTask.PkgName()] = &stateTask{
|
||||
parserTask: pkgTask,
|
||||
stateStatus: StateStatusNew,
|
||||
}
|
||||
}
|
||||
for _, pkgTele := range pkg.telegrafs() {
|
||||
state.mTelegrafs[pkgTele.PkgName()] = &stateTelegraf{
|
||||
parserTelegraf: pkgTele,
|
||||
stateStatus: StateStatusNew,
|
||||
}
|
||||
}
|
||||
for _, pkgVar := range pkg.variables() {
|
||||
state.mVariables[pkgVar.PkgName()] = &stateVariable{
|
||||
parserVar: pkgVar,
|
||||
|
@ -76,6 +100,14 @@ func (s *stateCoordinator) checks() []*stateCheck {
|
|||
return out
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) dashboards() []*stateDashboard {
|
||||
out := make([]*stateDashboard, 0, len(s.mDashboards))
|
||||
for _, d := range s.mDashboards {
|
||||
out = append(out, d)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) endpoints() []*stateEndpoint {
|
||||
out := make([]*stateEndpoint, 0, len(s.mEndpoints))
|
||||
for _, e := range s.mEndpoints {
|
||||
|
@ -92,6 +124,22 @@ func (s *stateCoordinator) labels() []*stateLabel {
|
|||
return out
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) tasks() []*stateTask {
|
||||
out := make([]*stateTask, 0, len(s.mTasks))
|
||||
for _, t := range s.mTasks {
|
||||
out = append(out, t)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) telegrafConfigs() []*stateTelegraf {
|
||||
out := make([]*stateTelegraf, 0, len(s.mTelegrafs))
|
||||
for _, t := range s.mTelegrafs {
|
||||
out = append(out, t)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *stateCoordinator) variables() []*stateVariable {
|
||||
out := make([]*stateVariable, 0, len(s.mVariables))
|
||||
for _, v := range s.mVariables {
|
||||
|
@ -116,6 +164,13 @@ func (s *stateCoordinator) diff() Diff {
|
|||
return diff.Checks[i].PkgName < diff.Checks[j].PkgName
|
||||
})
|
||||
|
||||
for _, d := range s.mDashboards {
|
||||
diff.Dashboards = append(diff.Dashboards, d.diffDashboard())
|
||||
}
|
||||
sort.Slice(diff.Dashboards, func(i, j int) bool {
|
||||
return diff.Dashboards[i].PkgName < diff.Dashboards[j].PkgName
|
||||
})
|
||||
|
||||
for _, e := range s.mEndpoints {
|
||||
diff.NotificationEndpoints = append(diff.NotificationEndpoints, e.diffEndpoint())
|
||||
}
|
||||
|
@ -130,6 +185,20 @@ func (s *stateCoordinator) diff() Diff {
|
|||
return diff.Labels[i].PkgName < diff.Labels[j].PkgName
|
||||
})
|
||||
|
||||
for _, t := range s.mTasks {
|
||||
diff.Tasks = append(diff.Tasks, t.diffTask())
|
||||
}
|
||||
sort.Slice(diff.Tasks, func(i, j int) bool {
|
||||
return diff.Tasks[i].PkgName < diff.Tasks[j].PkgName
|
||||
})
|
||||
|
||||
for _, t := range s.mTelegrafs {
|
||||
diff.Telegrafs = append(diff.Telegrafs, t.diffTelegraf())
|
||||
}
|
||||
sort.Slice(diff.Telegrafs, func(i, j int) bool {
|
||||
return diff.Telegrafs[i].PkgName < diff.Telegrafs[j].PkgName
|
||||
})
|
||||
|
||||
for _, v := range s.mVariables {
|
||||
diff.Variables = append(diff.Variables, v.diffVariable())
|
||||
}
|
||||
|
@ -182,12 +251,25 @@ func (s *stateCoordinator) summary() Summary {
|
|||
return sum.Checks[i].PkgName < sum.Checks[j].PkgName
|
||||
})
|
||||
|
||||
for _, d := range s.mDashboards {
|
||||
if IsRemoval(d.stateStatus) {
|
||||
continue
|
||||
}
|
||||
sum.Dashboards = append(sum.Dashboards, d.summarize())
|
||||
}
|
||||
sort.Slice(sum.Dashboards, func(i, j int) bool {
|
||||
return sum.Dashboards[i].PkgName < sum.Dashboards[j].PkgName
|
||||
})
|
||||
|
||||
for _, e := range s.mEndpoints {
|
||||
if IsRemoval(e.stateStatus) {
|
||||
continue
|
||||
}
|
||||
sum.NotificationEndpoints = append(sum.NotificationEndpoints, e.summarize())
|
||||
}
|
||||
sort.Slice(sum.NotificationEndpoints, func(i, j int) bool {
|
||||
return sum.NotificationEndpoints[i].PkgName < sum.NotificationEndpoints[j].PkgName
|
||||
})
|
||||
|
||||
for _, v := range s.mLabels {
|
||||
if IsRemoval(v.stateStatus) {
|
||||
|
@ -199,6 +281,26 @@ func (s *stateCoordinator) summary() Summary {
|
|||
return sum.Labels[i].PkgName < sum.Labels[j].PkgName
|
||||
})
|
||||
|
||||
for _, t := range s.mTasks {
|
||||
if IsRemoval(t.stateStatus) {
|
||||
continue
|
||||
}
|
||||
sum.Tasks = append(sum.Tasks, t.summarize())
|
||||
}
|
||||
sort.Slice(sum.Tasks, func(i, j int) bool {
|
||||
return sum.Tasks[i].PkgName < sum.Tasks[j].PkgName
|
||||
})
|
||||
|
||||
for _, t := range s.mTelegrafs {
|
||||
if IsRemoval(t.stateStatus) {
|
||||
continue
|
||||
}
|
||||
sum.TelegrafConfigs = append(sum.TelegrafConfigs, t.summarize())
|
||||
}
|
||||
sort.Slice(sum.TelegrafConfigs, func(i, j int) bool {
|
||||
return sum.TelegrafConfigs[i].PkgName < sum.TelegrafConfigs[j].PkgName
|
||||
})
|
||||
|
||||
for _, v := range s.mVariables {
|
||||
if IsRemoval(v.stateStatus) {
|
||||
continue
|
||||
|
@ -281,6 +383,18 @@ func (s *stateCoordinator) addObjectForRemoval(k Kind, pkgName string, id influx
|
|||
parserEndpoint: ¬ificationEndpoint{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindTask:
|
||||
s.mTasks[pkgName] = &stateTask{
|
||||
id: id,
|
||||
parserTask: &task{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindTelegraf:
|
||||
s.mTelegrafs[pkgName] = &stateTelegraf{
|
||||
id: id,
|
||||
parserTelegraf: &telegraf{identity: newIdentity},
|
||||
stateStatus: StateStatusRemove,
|
||||
}
|
||||
case KindVariable:
|
||||
s.mVariables[pkgName] = &stateVariable{
|
||||
id: id,
|
||||
|
@ -319,6 +433,18 @@ func (s *stateCoordinator) getObjectIDSetter(k Kind, pkgName string) (func(influ
|
|||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindTask:
|
||||
r, ok := s.mTasks[pkgName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindTelegraf:
|
||||
r, ok := s.mTelegrafs[pkgName]
|
||||
return func(id influxdb.ID) {
|
||||
r.id = id
|
||||
r.stateStatus = StateStatusExists
|
||||
}, ok
|
||||
case KindVariable:
|
||||
r, ok := s.mVariables[pkgName]
|
||||
return func(id influxdb.ID) {
|
||||
|
@ -479,6 +605,99 @@ func (c *stateCheck) summarize() SummaryCheck {
|
|||
return sum
|
||||
}
|
||||
|
||||
type stateDashboard struct {
|
||||
id, orgID influxdb.ID
|
||||
stateStatus StateStatus
|
||||
|
||||
parserDash *dashboard
|
||||
existing *influxdb.Dashboard
|
||||
}
|
||||
|
||||
func (d *stateDashboard) ID() influxdb.ID {
|
||||
if IsExisting(d.stateStatus) && d.existing != nil {
|
||||
return d.existing.ID
|
||||
}
|
||||
return d.id
|
||||
}
|
||||
|
||||
func (d *stateDashboard) labels() []*label {
|
||||
return d.parserDash.labels
|
||||
}
|
||||
|
||||
func (d *stateDashboard) resourceType() influxdb.ResourceType {
|
||||
return KindDashboard.ResourceType()
|
||||
}
|
||||
|
||||
func (d *stateDashboard) stateIdentity() stateIdentity {
|
||||
return stateIdentity{
|
||||
id: d.ID(),
|
||||
name: d.parserDash.Name(),
|
||||
pkgName: d.parserDash.PkgName(),
|
||||
resourceType: d.resourceType(),
|
||||
stateStatus: d.stateStatus,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *stateDashboard) diffDashboard() DiffDashboard {
|
||||
diff := DiffDashboard{
|
||||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(d.ID()),
|
||||
Remove: IsRemoval(d.stateStatus),
|
||||
StateStatus: d.stateStatus,
|
||||
PkgName: d.parserDash.PkgName(),
|
||||
},
|
||||
New: DiffDashboardValues{
|
||||
Name: d.parserDash.Name(),
|
||||
Desc: d.parserDash.Description,
|
||||
Charts: make([]DiffChart, 0, len(d.parserDash.Charts)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range d.parserDash.Charts {
|
||||
diff.New.Charts = append(diff.New.Charts, DiffChart{
|
||||
Properties: c.properties(),
|
||||
Height: c.Height,
|
||||
Width: c.Width,
|
||||
})
|
||||
}
|
||||
|
||||
if d.existing == nil {
|
||||
return diff
|
||||
}
|
||||
|
||||
oldDiff := DiffDashboardValues{
|
||||
Name: d.existing.Name,
|
||||
Desc: d.existing.Description,
|
||||
Charts: make([]DiffChart, 0, len(d.existing.Cells)),
|
||||
}
|
||||
|
||||
for _, c := range d.existing.Cells {
|
||||
var props influxdb.ViewProperties
|
||||
if c.View != nil {
|
||||
props = c.View.Properties
|
||||
}
|
||||
|
||||
oldDiff.Charts = append(oldDiff.Charts, DiffChart{
|
||||
Properties: props,
|
||||
XPosition: int(c.X),
|
||||
YPosition: int(c.Y),
|
||||
Height: int(c.H),
|
||||
Width: int(c.W),
|
||||
})
|
||||
}
|
||||
|
||||
diff.Old = &oldDiff
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
func (d *stateDashboard) summarize() SummaryDashboard {
|
||||
sum := d.parserDash.summarize()
|
||||
sum.ID = SafeID(d.ID())
|
||||
sum.OrgID = SafeID(d.orgID)
|
||||
return sum
|
||||
}
|
||||
|
||||
type stateLabel struct {
|
||||
id, orgID influxdb.ID
|
||||
stateStatus StateStatus
|
||||
|
@ -664,6 +883,132 @@ func (e *stateEndpoint) summarize() SummaryNotificationEndpoint {
|
|||
return sum
|
||||
}
|
||||
|
||||
type stateTask struct {
|
||||
id, orgID influxdb.ID
|
||||
stateStatus StateStatus
|
||||
|
||||
parserTask *task
|
||||
existing *influxdb.Task
|
||||
}
|
||||
|
||||
func (t *stateTask) ID() influxdb.ID {
|
||||
if !IsNew(t.stateStatus) && t.existing != nil {
|
||||
return t.existing.ID
|
||||
}
|
||||
return t.id
|
||||
}
|
||||
|
||||
func (t *stateTask) diffTask() DiffTask {
|
||||
diff := DiffTask{
|
||||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(t.ID()),
|
||||
Remove: IsRemoval(t.stateStatus),
|
||||
PkgName: t.parserTask.PkgName(),
|
||||
},
|
||||
New: DiffTaskValues{
|
||||
Name: t.parserTask.Name(),
|
||||
Cron: t.parserTask.cron,
|
||||
Description: t.parserTask.description,
|
||||
Every: durToStr(t.parserTask.every),
|
||||
Offset: durToStr(t.parserTask.offset),
|
||||
Query: t.parserTask.query,
|
||||
Status: t.parserTask.Status(),
|
||||
},
|
||||
}
|
||||
|
||||
if t.existing == nil {
|
||||
return diff
|
||||
}
|
||||
|
||||
diff.Old = &DiffTaskValues{
|
||||
Name: t.existing.Name,
|
||||
Cron: t.existing.Cron,
|
||||
Description: t.existing.Description,
|
||||
Every: t.existing.Every,
|
||||
Offset: t.existing.Offset.String(),
|
||||
Query: t.existing.Flux,
|
||||
Status: influxdb.Status(t.existing.Status),
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
func (t *stateTask) labels() []*label {
|
||||
return t.parserTask.labels
|
||||
}
|
||||
|
||||
func (t *stateTask) resourceType() influxdb.ResourceType {
|
||||
return influxdb.TasksResourceType
|
||||
}
|
||||
|
||||
func (t *stateTask) stateIdentity() stateIdentity {
|
||||
return stateIdentity{
|
||||
id: t.ID(),
|
||||
name: t.parserTask.Name(),
|
||||
pkgName: t.parserTask.PkgName(),
|
||||
resourceType: t.resourceType(),
|
||||
stateStatus: t.stateStatus,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *stateTask) summarize() SummaryTask {
|
||||
sum := t.parserTask.summarize()
|
||||
sum.ID = SafeID(t.id)
|
||||
return sum
|
||||
}
|
||||
|
||||
type stateTelegraf struct {
|
||||
id, orgID influxdb.ID
|
||||
stateStatus StateStatus
|
||||
|
||||
parserTelegraf *telegraf
|
||||
existing *influxdb.TelegrafConfig
|
||||
}
|
||||
|
||||
func (t *stateTelegraf) ID() influxdb.ID {
|
||||
if !IsNew(t.stateStatus) && t.existing != nil {
|
||||
return t.existing.ID
|
||||
}
|
||||
return t.id
|
||||
}
|
||||
|
||||
func (t *stateTelegraf) diffTelegraf() DiffTelegraf {
|
||||
return DiffTelegraf{
|
||||
DiffIdentifier: DiffIdentifier{
|
||||
ID: SafeID(t.ID()),
|
||||
Remove: IsRemoval(t.stateStatus),
|
||||
PkgName: t.parserTelegraf.PkgName(),
|
||||
},
|
||||
New: t.parserTelegraf.config,
|
||||
Old: t.existing,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *stateTelegraf) labels() []*label {
|
||||
return t.parserTelegraf.labels
|
||||
}
|
||||
|
||||
func (t *stateTelegraf) resourceType() influxdb.ResourceType {
|
||||
return influxdb.TelegrafsResourceType
|
||||
}
|
||||
|
||||
func (t *stateTelegraf) stateIdentity() stateIdentity {
|
||||
return stateIdentity{
|
||||
id: t.ID(),
|
||||
name: t.parserTelegraf.Name(),
|
||||
pkgName: t.parserTelegraf.PkgName(),
|
||||
resourceType: t.resourceType(),
|
||||
stateStatus: t.stateStatus,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *stateTelegraf) summarize() SummaryTelegraf {
|
||||
sum := t.parserTelegraf.summarize()
|
||||
sum.TelegrafConfig.ID = t.id
|
||||
sum.TelegrafConfig.OrgID = t.orgID
|
||||
return sum
|
||||
}
|
||||
|
||||
type stateVariable struct {
|
||||
id, orgID influxdb.ID
|
||||
stateStatus StateStatus
|
||||
|
|
|
@ -829,12 +829,19 @@ func TestService(t *testing.T) {
|
|||
sum, _, err := svc.Apply(context.TODO(), orgID, 0, pkg)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, sum.Dashboards, 1)
|
||||
require.Len(t, sum.Dashboards, 2)
|
||||
dash1 := sum.Dashboards[0]
|
||||
assert.Equal(t, SafeID(1), dash1.ID)
|
||||
assert.Equal(t, SafeID(orgID), dash1.OrgID)
|
||||
assert.NotZero(t, dash1.ID)
|
||||
assert.NotZero(t, dash1.OrgID)
|
||||
assert.Equal(t, "dash_1", dash1.PkgName)
|
||||
assert.Equal(t, "display name", dash1.Name)
|
||||
require.Len(t, dash1.Charts, 1)
|
||||
|
||||
dash2 := sum.Dashboards[1]
|
||||
assert.NotZero(t, dash2.ID)
|
||||
assert.Equal(t, "dash_2", dash2.PkgName)
|
||||
assert.Equal(t, "dash_2", dash2.Name)
|
||||
require.Empty(t, dash2.Charts)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -855,8 +862,6 @@ func TestService(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
|
||||
pkg.mDashboards["dash_1_copy"] = pkg.mDashboards["dash_1"]
|
||||
|
||||
svc := newTestService(WithDashboardSVC(fakeDashSVC))
|
||||
|
||||
orgID := influxdb.ID(9000)
|
||||
|
@ -913,9 +918,6 @@ func TestService(t *testing.T) {
|
|||
l.ID = influxdb.ID(fakeLabelSVC.CreateLabelCalls.Count() + 1)
|
||||
return nil
|
||||
}
|
||||
fakeLabelSVC.DeleteLabelMappingFn = func(_ context.Context, m *influxdb.LabelMapping) error {
|
||||
return nil
|
||||
}
|
||||
fakeLabelSVC.CreateLabelMappingFn = func(_ context.Context, mapping *influxdb.LabelMapping) error {
|
||||
if mapping.ResourceID == 0 {
|
||||
return errors.New("did not get a resource ID")
|
||||
|
@ -1074,19 +1076,22 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("maps dashboards with labels", func(t *testing.T) {
|
||||
testLabelMappingFn(
|
||||
t,
|
||||
"testdata/dashboard_associates_label.yml",
|
||||
1,
|
||||
func() []ServiceSetterFn {
|
||||
fakeDashSVC := mock.NewDashboardService()
|
||||
fakeDashSVC.CreateDashboardF = func(_ context.Context, d *influxdb.Dashboard) error {
|
||||
d.ID = influxdb.ID(rand.Int())
|
||||
return nil
|
||||
}
|
||||
return []ServiceSetterFn{WithDashboardSVC(fakeDashSVC)}
|
||||
},
|
||||
)
|
||||
opts := func() []ServiceSetterFn {
|
||||
fakeDashSVC := mock.NewDashboardService()
|
||||
fakeDashSVC.CreateDashboardF = func(_ context.Context, d *influxdb.Dashboard) error {
|
||||
d.ID = influxdb.ID(rand.Int())
|
||||
return nil
|
||||
}
|
||||
return []ServiceSetterFn{WithDashboardSVC(fakeDashSVC)}
|
||||
}
|
||||
|
||||
t.Run("applies successfully", func(t *testing.T) {
|
||||
testLabelMappingV2ApplyFn(t, "testdata/dashboard_associates_label.yml", 2, opts)
|
||||
})
|
||||
|
||||
t.Run("deletes new label mappings on error", func(t *testing.T) {
|
||||
testLabelMappingV2RollbackFn(t, "testdata/dashboard_associates_label.yml", 1, opts)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("maps notification endpoints with labels", func(t *testing.T) {
|
||||
|
@ -1141,48 +1146,54 @@ func TestService(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("maps tasks with labels", func(t *testing.T) {
|
||||
testLabelMappingFn(
|
||||
t,
|
||||
"testdata/tasks.yml",
|
||||
2,
|
||||
func() []ServiceSetterFn {
|
||||
fakeTaskSVC := mock.NewTaskService()
|
||||
fakeTaskSVC.CreateTaskFn = func(ctx context.Context, tc influxdb.TaskCreate) (*influxdb.Task, error) {
|
||||
reg := regexp.MustCompile(`name: "(.+)",`)
|
||||
names := reg.FindStringSubmatch(tc.Flux)
|
||||
if len(names) < 2 {
|
||||
return nil, errors.New("bad flux query provided: " + tc.Flux)
|
||||
}
|
||||
return &influxdb.Task{
|
||||
ID: influxdb.ID(rand.Int()),
|
||||
Type: tc.Type,
|
||||
OrganizationID: tc.OrganizationID,
|
||||
OwnerID: tc.OwnerID,
|
||||
Name: names[1],
|
||||
Description: tc.Description,
|
||||
Status: tc.Status,
|
||||
Flux: tc.Flux,
|
||||
}, nil
|
||||
opts := func() []ServiceSetterFn {
|
||||
fakeTaskSVC := mock.NewTaskService()
|
||||
fakeTaskSVC.CreateTaskFn = func(ctx context.Context, tc influxdb.TaskCreate) (*influxdb.Task, error) {
|
||||
reg := regexp.MustCompile(`name: "(.+)",`)
|
||||
names := reg.FindStringSubmatch(tc.Flux)
|
||||
if len(names) < 2 {
|
||||
return nil, errors.New("bad flux query provided: " + tc.Flux)
|
||||
}
|
||||
return []ServiceSetterFn{WithTaskSVC(fakeTaskSVC)}
|
||||
},
|
||||
)
|
||||
return &influxdb.Task{
|
||||
ID: influxdb.ID(rand.Int()),
|
||||
Type: tc.Type,
|
||||
OrganizationID: tc.OrganizationID,
|
||||
OwnerID: tc.OwnerID,
|
||||
Name: names[1],
|
||||
Description: tc.Description,
|
||||
Status: tc.Status,
|
||||
Flux: tc.Flux,
|
||||
}, nil
|
||||
}
|
||||
return []ServiceSetterFn{WithTaskSVC(fakeTaskSVC)}
|
||||
}
|
||||
|
||||
t.Run("applies successfully", func(t *testing.T) {
|
||||
testLabelMappingV2ApplyFn(t, "testdata/tasks.yml", 2, opts)
|
||||
})
|
||||
|
||||
t.Run("deletes new label mappings on error", func(t *testing.T) {
|
||||
testLabelMappingV2RollbackFn(t, "testdata/tasks.yml", 1, opts)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("maps telegrafs with labels", func(t *testing.T) {
|
||||
testLabelMappingFn(
|
||||
t,
|
||||
"testdata/telegraf.yml",
|
||||
1,
|
||||
func() []ServiceSetterFn {
|
||||
fakeTeleSVC := mock.NewTelegrafConfigStore()
|
||||
fakeTeleSVC.CreateTelegrafConfigF = func(_ context.Context, cfg *influxdb.TelegrafConfig, _ influxdb.ID) error {
|
||||
cfg.ID = influxdb.ID(rand.Int())
|
||||
return nil
|
||||
}
|
||||
return []ServiceSetterFn{WithTelegrafSVC(fakeTeleSVC)}
|
||||
},
|
||||
)
|
||||
opts := func() []ServiceSetterFn {
|
||||
fakeTeleSVC := mock.NewTelegrafConfigStore()
|
||||
fakeTeleSVC.CreateTelegrafConfigF = func(_ context.Context, cfg *influxdb.TelegrafConfig, _ influxdb.ID) error {
|
||||
cfg.ID = influxdb.ID(rand.Int())
|
||||
return nil
|
||||
}
|
||||
return []ServiceSetterFn{WithTelegrafSVC(fakeTeleSVC)}
|
||||
}
|
||||
|
||||
t.Run("applies successfully", func(t *testing.T) {
|
||||
testLabelMappingV2ApplyFn(t, "testdata/telegraf.yml", 2, opts)
|
||||
})
|
||||
|
||||
t.Run("deletes new label mappings on error", func(t *testing.T) {
|
||||
testLabelMappingV2RollbackFn(t, "testdata/telegraf.yml", 1, opts)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("maps variables with labels", func(t *testing.T) {
|
||||
|
@ -1391,11 +1402,15 @@ func TestService(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, sum.Tasks, 2)
|
||||
for i, actual := range sum.Tasks {
|
||||
assert.NotZero(t, actual.ID)
|
||||
assert.Equal(t, "task_"+strconv.Itoa(i), actual.Name)
|
||||
assert.Equal(t, "desc_"+strconv.Itoa(i), actual.Description)
|
||||
}
|
||||
assert.NotZero(t, sum.Tasks[0].ID)
|
||||
assert.Equal(t, "task_1", sum.Tasks[0].PkgName)
|
||||
assert.Equal(t, "task_1", sum.Tasks[0].Name)
|
||||
assert.Equal(t, "desc_1", sum.Tasks[0].Description)
|
||||
|
||||
assert.NotZero(t, sum.Tasks[1].ID)
|
||||
assert.Equal(t, "task_UUID", sum.Tasks[1].PkgName)
|
||||
assert.Equal(t, "task_0", sum.Tasks[1].Name)
|
||||
assert.Equal(t, "desc_0", sum.Tasks[1].Description)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -1449,6 +1464,7 @@ func TestService(t *testing.T) {
|
|||
testfileRunner(t, "testdata/telegraf.yml", func(t *testing.T, pkg *Pkg) {
|
||||
fakeTeleSVC := mock.NewTelegrafConfigStore()
|
||||
fakeTeleSVC.CreateTelegrafConfigF = func(_ context.Context, tc *influxdb.TelegrafConfig, userID influxdb.ID) error {
|
||||
t.Log("called")
|
||||
if fakeTeleSVC.CreateTelegrafConfigCalls.Count() == 1 {
|
||||
return errors.New("limit hit")
|
||||
}
|
||||
|
@ -1462,7 +1478,12 @@ func TestService(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
|
||||
pkg.mTelegrafs["first_tele_config_copy"] = pkg.mTelegrafs["first_tele_config"]
|
||||
stubTele := &telegraf{
|
||||
identity: identity{
|
||||
name: &references{val: "stub"},
|
||||
},
|
||||
}
|
||||
pkg.mTelegrafs[stubTele.PkgName()] = stubTele
|
||||
|
||||
svc := newTestService(WithTelegrafSVC(fakeTeleSVC))
|
||||
|
||||
|
|
|
@ -42,5 +42,15 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "influxdata.com/v2alpha1",
|
||||
"kind": "Dashboard",
|
||||
"metadata": {
|
||||
"name": "dash_2"
|
||||
},
|
||||
"spec": {
|
||||
"description": "desc"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
|
@ -27,3 +27,10 @@ spec:
|
|||
type: text
|
||||
hex: "#8F8AF4"
|
||||
value: 3
|
||||
---
|
||||
apiVersion: influxdata.com/v2alpha1
|
||||
kind: Dashboard
|
||||
metadata:
|
||||
name: dash_2
|
||||
spec:
|
||||
description: desc
|
||||
|
|
|
@ -6,6 +6,13 @@
|
|||
"name": "label_1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "influxdata.com/v2alpha1",
|
||||
"kind": "Label",
|
||||
"metadata": {
|
||||
"name": "label_2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "influxdata.com/v2alpha1",
|
||||
"kind": "Dashboard",
|
||||
|
@ -17,6 +24,10 @@
|
|||
{
|
||||
"kind": "Label",
|
||||
"name": "label_1"
|
||||
},
|
||||
{
|
||||
"kind": "Label",
|
||||
"name": "label_2"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
---
|
||||
apiVersion: influxdata.com/v2alpha1
|
||||
kind: Label
|
||||
metadata:
|
||||
name: label_1
|
||||
---
|
||||
apiVersion: influxdata.com/v2alpha1
|
||||
kind: Label
|
||||
metadata:
|
||||
name: label_2
|
||||
---
|
||||
apiVersion: influxdata.com/v2alpha1
|
||||
kind: Dashboard
|
||||
metadata:
|
||||
name: dash_1
|
||||
|
@ -12,3 +16,5 @@ spec:
|
|||
associations:
|
||||
- kind: Label
|
||||
name: label_1
|
||||
- kind: Label
|
||||
name: label_2
|
||||
|
|
|
@ -6,6 +6,13 @@
|
|||
"name": "label_1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "influxdata.com/v2alpha1",
|
||||
"kind": "Label",
|
||||
"metadata": {
|
||||
"name": "label_2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "influxdata.com/v2alpha1",
|
||||
"kind": "Telegraf",
|
||||
|
@ -19,6 +26,10 @@
|
|||
{
|
||||
"kind": "Label",
|
||||
"name": "label_1"
|
||||
},
|
||||
{
|
||||
"kind": "Label",
|
||||
"name": "label_2"
|
||||
}
|
||||
],
|
||||
"config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:9999\n urls = [\"http://localhost:9999\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]"
|
||||
|
|
|
@ -4,6 +4,11 @@ metadata:
|
|||
name: label_1
|
||||
---
|
||||
apiVersion: influxdata.com/v2alpha1
|
||||
kind: Label
|
||||
metadata:
|
||||
name: label_2
|
||||
---
|
||||
apiVersion: influxdata.com/v2alpha1
|
||||
kind: Telegraf
|
||||
metadata:
|
||||
name: first_tele_config
|
||||
|
@ -13,6 +18,8 @@ spec:
|
|||
associations:
|
||||
- kind: Label
|
||||
name: label_1
|
||||
- kind: Label
|
||||
name: label_2
|
||||
config: |
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
|
|
|
@ -142,6 +142,13 @@ func (h *OrgHandler) handleGetOrgs(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
if id := qp.Get("userID"); id != "" {
|
||||
i, err := influxdb.IDFromString(id)
|
||||
if err == nil {
|
||||
filter.UserID = i
|
||||
}
|
||||
}
|
||||
|
||||
orgs, _, err := h.orgSvc.FindOrganizations(r.Context(), filter)
|
||||
if err != nil {
|
||||
h.api.Err(w, err)
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"github.com/influxdata/influxdb/v2"
|
||||
"github.com/influxdata/influxdb/v2/authorizer"
|
||||
icontext "github.com/influxdata/influxdb/v2/context"
|
||||
)
|
||||
|
||||
var _ influxdb.OrganizationService = (*AuthedOrgService)(nil)
|
||||
|
@ -46,8 +47,18 @@ func (s *AuthedOrgService) FindOrganization(ctx context.Context, filter influxdb
|
|||
|
||||
// FindOrganizations retrieves all organizations that match the provided filter and then filters the list down to only the resources that are authorized.
|
||||
func (s *AuthedOrgService) FindOrganizations(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) {
|
||||
// TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data
|
||||
// will likely be expensive.
|
||||
if filter.Name == nil && filter.ID == nil && filter.UserID == nil {
|
||||
// if the user doesnt have permission to look up all orgs we need to add this users id to the filter to save lookup time
|
||||
auth, err := icontext.GetAuthorizer(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if _, _, err := authorizer.AuthorizeReadGlobal(ctx, influxdb.OrgsResourceType); err != nil {
|
||||
userid := auth.GetUserID()
|
||||
filter.UserID = &userid
|
||||
}
|
||||
}
|
||||
|
||||
os, _, err := s.s.FindOrganizations(ctx, filter, opt...)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
|
|
|
@ -67,6 +67,28 @@ func (s *Service) FindOrganizations(ctx context.Context, filter influxdb.Organiz
|
|||
}
|
||||
|
||||
var orgs []*influxdb.Organization
|
||||
|
||||
if filter.UserID != nil {
|
||||
// find urms for orgs with this user
|
||||
urms, _, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{
|
||||
UserID: *filter.UserID,
|
||||
ResourceType: influxdb.OrgsResourceType,
|
||||
}, opt...)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// find orgs by the urm's resource ids.
|
||||
for _, urm := range urms {
|
||||
o, err := s.FindOrganizationByID(ctx, urm.ResourceID)
|
||||
if err == nil {
|
||||
// if there is an error then this is a crufty urm and we should just move on
|
||||
orgs = append(orgs, o)
|
||||
}
|
||||
}
|
||||
|
||||
return orgs, len(orgs), nil
|
||||
}
|
||||
|
||||
err := s.store.View(ctx, func(tx kv.Tx) error {
|
||||
os, err := s.store.ListOrgs(ctx, tx, opt...)
|
||||
if err != nil {
|
||||
|
|
|
@ -261,35 +261,45 @@ describe('Buckets', () => {
|
|||
|
||||
describe('add data', () => {
|
||||
it('writing data to buckets', () => {
|
||||
// writing a well-formed line is accepted
|
||||
cy.getByTestID('add-data--button').click()
|
||||
cy.getByTestID('bucket-add-line-protocol').click()
|
||||
cy.getByTestID('Enter Manually').click()
|
||||
cy.getByTestID('line-protocol--text-area').type('m1,t1=v1 v=1.0')
|
||||
cy.getByTestID('next').click()
|
||||
cy.getByTestID('line-protocol--status').should('have.class', 'success')
|
||||
cy.getByTestID('next').click()
|
||||
cy.get('@org').then(({id: orgID}: Organization) => {
|
||||
// writing a well-formed line is accepted
|
||||
cy.getByTestID('add-data--button').click()
|
||||
cy.getByTestID('bucket-add-client-library').click()
|
||||
cy.location('pathname').should(
|
||||
'be',
|
||||
`/orgs/${orgID}/load-data/client-libraries`
|
||||
)
|
||||
cy.go('back')
|
||||
cy.getByTestID('add-data--button').click()
|
||||
|
||||
// writing a poorly-formed line errors
|
||||
cy.getByTestID('add-data--button').click()
|
||||
cy.getByTestID('bucket-add-line-protocol').click()
|
||||
cy.getByTestID('Enter Manually').click()
|
||||
cy.getByTestID('line-protocol--text-area').type('invalid invalid')
|
||||
cy.getByTestID('next').click()
|
||||
cy.getByTestID('line-protocol--status').should('have.class', 'error')
|
||||
cy.getByTestID('next').click()
|
||||
cy.getByTestID('bucket-add-line-protocol').click()
|
||||
cy.getByTestID('Enter Manually').click()
|
||||
cy.getByTestID('line-protocol--text-area').type('m1,t1=v1 v=1.0')
|
||||
cy.getByTestID('next').click()
|
||||
cy.getByTestID('line-protocol--status').should('have.class', 'success')
|
||||
cy.getByTestID('next').click()
|
||||
|
||||
// writing a well-formed line with millisecond precision is accepted
|
||||
cy.getByTestID('add-data--button').click()
|
||||
cy.getByTestID('bucket-add-line-protocol').click()
|
||||
cy.getByTestID('Enter Manually').click()
|
||||
cy.getByTestID('wizard-step--lp-precision--dropdown').click()
|
||||
cy.getByTestID('wizard-step--lp-precision-ms').click()
|
||||
const now = Date.now()
|
||||
cy.getByTestID('line-protocol--text-area').type(`m2,t2=v2 v=2.0 ${now}`)
|
||||
cy.getByTestID('next').click()
|
||||
cy.getByTestID('line-protocol--status').should('have.class', 'success')
|
||||
cy.getByTestID('next').click()
|
||||
// writing a poorly-formed line errors
|
||||
cy.getByTestID('add-data--button').click()
|
||||
cy.getByTestID('bucket-add-line-protocol').click()
|
||||
cy.getByTestID('Enter Manually').click()
|
||||
cy.getByTestID('line-protocol--text-area').type('invalid invalid')
|
||||
cy.getByTestID('next').click()
|
||||
cy.getByTestID('line-protocol--status').should('have.class', 'error')
|
||||
cy.getByTestID('next').click()
|
||||
|
||||
// writing a well-formed line with millisecond precision is accepted
|
||||
cy.getByTestID('add-data--button').click()
|
||||
cy.getByTestID('bucket-add-line-protocol').click()
|
||||
cy.getByTestID('Enter Manually').click()
|
||||
cy.getByTestID('wizard-step--lp-precision--dropdown').click()
|
||||
cy.getByTestID('wizard-step--lp-precision-ms').click()
|
||||
const now = Date.now()
|
||||
cy.getByTestID('line-protocol--text-area').type(`m2,t2=v2 v=2.0 ${now}`)
|
||||
cy.getByTestID('next').click()
|
||||
cy.getByTestID('line-protocol--status').should('have.class', 'success')
|
||||
cy.getByTestID('next').click()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -283,7 +283,7 @@ describe('Collectors', () => {
|
|||
cy.contains('Done')
|
||||
.click()
|
||||
.then(() => {
|
||||
cy.get('.icon.checkmark').should('exist')
|
||||
cy.get('.cf-icon.checkmark').should('exist')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -292,21 +292,14 @@ describe('Collectors', () => {
|
|||
it('handles busted input', () => {
|
||||
// do nothing when clicking done with no urls
|
||||
cy.contains('Done').click()
|
||||
cy.contains('Done').should('exist')
|
||||
cy.contains('Nginx').should('exist')
|
||||
cy.contains('nginx').should('exist')
|
||||
cy.get('.cf-icon.circle-thick').should('exist')
|
||||
|
||||
cy.contains('nginx').click()
|
||||
cy.getByTestID('input-field').type('youre mom')
|
||||
cy.contains('Add').click()
|
||||
|
||||
cy.contains('youre mom')
|
||||
.should('exist')
|
||||
.then(() => {
|
||||
cy.contains('Done')
|
||||
.click()
|
||||
.then(() => {
|
||||
cy.get('.icon.remove').should('exist')
|
||||
})
|
||||
})
|
||||
cy.contains('Done').click()
|
||||
cy.get('.cf-icon.remove').should('exist')
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -345,7 +338,7 @@ describe('Collectors', () => {
|
|||
cy.contains('alan bean').should('not.exist')
|
||||
|
||||
cy.contains('Done').click()
|
||||
cy.get('.icon.checkmark').should('exist')
|
||||
cy.get('.cf-icon.checkmark').should('exist')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -354,8 +347,7 @@ describe('Collectors', () => {
|
|||
cy.contains('Done')
|
||||
.click()
|
||||
.then(() => {
|
||||
cy.contains('Done').should('exist')
|
||||
cy.contains('Redis').should('exist')
|
||||
cy.contains('redis').should('exist')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -17,6 +17,7 @@ import {
|
|||
interface Props {
|
||||
onAddCollector: () => void
|
||||
onAddLineProtocol: () => void
|
||||
onAddClientLibrary: () => void
|
||||
onAddScraper: () => void
|
||||
}
|
||||
|
||||
|
@ -24,7 +25,12 @@ export default class BucketAddDataButton extends PureComponent<Props> {
|
|||
private triggerRef: RefObject<ButtonRef> = createRef()
|
||||
|
||||
public render() {
|
||||
const {onAddCollector, onAddLineProtocol, onAddScraper} = this.props
|
||||
const {
|
||||
onAddCollector,
|
||||
onAddLineProtocol,
|
||||
onAddClientLibrary,
|
||||
onAddScraper,
|
||||
} = this.props
|
||||
|
||||
return (
|
||||
<>
|
||||
|
@ -58,6 +64,20 @@ export default class BucketAddDataButton extends PureComponent<Props> {
|
|||
Quickly load an existing line protocol file.
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
className="bucket-add-data--option"
|
||||
onClick={onAddClientLibrary}
|
||||
>
|
||||
<div
|
||||
className="bucket-add-data--option-header"
|
||||
data-testid="bucket-add-client-library"
|
||||
>
|
||||
Client Library
|
||||
</div>
|
||||
<div className="bucket-add-data--option-desc">
|
||||
Write data easily from your own application.
|
||||
</div>
|
||||
</div>
|
||||
<CloudExclude>
|
||||
<div className="bucket-add-data--option" onClick={onAddScraper}>
|
||||
<div className="bucket-add-data--option-header">
|
||||
|
|
|
@ -88,6 +88,13 @@ const BucketCard: FC<Props & WithRouterProps & DispatchProps> = ({
|
|||
)
|
||||
}
|
||||
|
||||
const handleAddClientLibrary = (): void => {
|
||||
onSetDataLoadersBucket(orgID, bucket.name, bucket.id)
|
||||
onSetDataLoadersType(DataLoaderType.ClientLibrary)
|
||||
|
||||
router.push(`/orgs/${orgID}/load-data/client-libraries`)
|
||||
}
|
||||
|
||||
const handleAddScraper = () => {
|
||||
onSetDataLoadersBucket(orgID, bucket.name, bucket.id)
|
||||
|
||||
|
@ -110,6 +117,7 @@ const BucketCard: FC<Props & WithRouterProps & DispatchProps> = ({
|
|||
<BucketAddDataButton
|
||||
onAddCollector={handleAddCollector}
|
||||
onAddLineProtocol={handleAddLineProtocol}
|
||||
onAddClientLibrary={handleAddClientLibrary}
|
||||
onAddScraper={handleAddScraper}
|
||||
/>
|
||||
<Button
|
||||
|
|
|
@ -89,6 +89,7 @@ export class PluginConfigForm extends PureComponent<Props> {
|
|||
const activeTelegrafPlugin = telegrafPlugins.find(tp => tp.active)
|
||||
if (!!activeTelegrafPlugin) {
|
||||
if (!activeTelegrafPlugin.hasOwnProperty('plugin')) {
|
||||
onSetActiveTelegrafPlugin('')
|
||||
return
|
||||
}
|
||||
onSetPluginConfiguration(activeTelegrafPlugin.name)
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
// Libraries
|
||||
import React from 'react'
|
||||
import {shallow} from 'enzyme'
|
||||
|
||||
// Components
|
||||
import SideBarTab from 'src/dataLoaders/components/side_bar/SideBarTab'
|
||||
|
||||
// Types
|
||||
import {SideBarTabStatus as TabStatus} from 'src/dataLoaders/components/side_bar/SideBar'
|
||||
|
||||
// Constants
|
||||
import {IconFont} from 'src/clockface'
|
||||
|
||||
const onClick = jest.fn(() => {})
|
||||
|
||||
const setup = (override?) => {
|
||||
const props = {
|
||||
label: 'label',
|
||||
key: 'key',
|
||||
id: 'id',
|
||||
active: true,
|
||||
status: TabStatus.Default,
|
||||
onClick,
|
||||
...override,
|
||||
}
|
||||
|
||||
const wrapper = shallow(<SideBarTab {...props} />)
|
||||
|
||||
return {wrapper}
|
||||
}
|
||||
|
||||
describe('SideBarTab', () => {
|
||||
describe('rendering', () => {
|
||||
it('renders! wee!', () => {
|
||||
const {wrapper} = setup()
|
||||
expect(wrapper.exists()).toBe(true)
|
||||
})
|
||||
|
||||
it('renders a checkmark if status success', () => {
|
||||
const {wrapper} = setup({status: TabStatus.Success})
|
||||
expect(wrapper.exists()).toBe(true)
|
||||
expect(wrapper.find(`.${IconFont.Checkmark}`)).toHaveLength(1)
|
||||
})
|
||||
})
|
||||
})
|
|
@ -3,7 +3,7 @@ import React, {Component} from 'react'
|
|||
import classnames from 'classnames'
|
||||
|
||||
// Types
|
||||
import {IconFont} from 'src/clockface'
|
||||
import {Icon, IconFont} from '@influxdata/clockface'
|
||||
import {SideBarTabStatus as TabStatus} from 'src/dataLoaders/components/side_bar/SideBar'
|
||||
|
||||
interface Props {
|
||||
|
@ -20,8 +20,9 @@ class SideBarTab extends Component<Props> {
|
|||
|
||||
return (
|
||||
<div className={this.className} onClick={this.handleClick}>
|
||||
{this.icon}
|
||||
{label}
|
||||
<pre>
|
||||
{this.icon} {label}
|
||||
</pre>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
@ -45,24 +46,16 @@ class SideBarTab extends Component<Props> {
|
|||
|
||||
private get icon(): JSX.Element {
|
||||
const {status} = this.props
|
||||
let icon
|
||||
|
||||
switch (status) {
|
||||
case TabStatus.Pending:
|
||||
case TabStatus.Success:
|
||||
icon = `side-bar--icon icon ${IconFont.Checkmark}`
|
||||
break
|
||||
return <Icon glyph={IconFont.Checkmark} />
|
||||
case TabStatus.Error:
|
||||
icon = `side-bar--icon icon ${IconFont.Remove}`
|
||||
break
|
||||
case TabStatus.Default:
|
||||
icon = `side-bar--icon icon ${IconFont.CircleThick}`
|
||||
break
|
||||
return <Icon glyph={IconFont.Remove} />
|
||||
default:
|
||||
icon = `side-bar--icon`
|
||||
return <Icon glyph={IconFont.CircleThick} />
|
||||
}
|
||||
|
||||
return <span className={icon} />
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ $flux-editor--right-panel: 330px;
|
|||
flex: 1 0 0;
|
||||
position: relative;
|
||||
border-radius: $cf-radius;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
// Variables / Functions List
|
||||
|
|
|
@ -84,6 +84,7 @@ export enum DataLoaderType {
|
|||
CSV = 'CSV',
|
||||
Streaming = 'Streaming',
|
||||
LineProtocol = 'Line Protocol',
|
||||
ClientLibrary = 'Client Library',
|
||||
Scraping = 'Scraping',
|
||||
Empty = '',
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue