fix(telegraf): support pagination parameters when listing
parent
42445a71ff
commit
b274e15eac
|
@ -96,10 +96,10 @@ func (b *cmdTelegrafBuilder) listRunE(cmd *cobra.Command, args []string) error {
|
||||||
return b.writeTelegrafConfig(cfg)
|
return b.writeTelegrafConfig(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
cfgs, _, err := svc.FindTelegrafConfigs(context.Background(), influxdb.TelegrafConfigFilter{
|
cfgs, _, err := svc.FindTelegrafConfigs(context.Background(),
|
||||||
OrgID: &orgID,
|
influxdb.TelegrafConfigFilter{
|
||||||
UserResourceMappingFilter: influxdb.UserResourceMappingFilter{ResourceType: influxdb.TelegrafsResourceType},
|
OrgID: &orgID,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/influxdata/influxdb/v2"
|
"github.com/influxdata/influxdb/v2"
|
||||||
"github.com/influxdata/influxdb/v2/kv"
|
"github.com/influxdata/influxdb/v2/kv"
|
||||||
|
@ -160,38 +159,39 @@ func (s *Service) unsetDefault(tx kv.Tx, compKey []byte) error {
|
||||||
// getFirstBut returns the first element in the db/rp index (not accounting for the `skipID`).
|
// getFirstBut returns the first element in the db/rp index (not accounting for the `skipID`).
|
||||||
// If the length of the returned ID is 0, it means no element was found.
|
// If the length of the returned ID is 0, it means no element was found.
|
||||||
// The skip value is useful, for instance, if one wants to delete an element based on the result of this operation.
|
// The skip value is useful, for instance, if one wants to delete an element based on the result of this operation.
|
||||||
func (s *Service) getFirstBut(tx kv.Tx, compKey []byte, skipID []byte) ([]byte, error) {
|
func (s *Service) getFirstBut(tx kv.Tx, compKey []byte, skipID []byte) (next []byte, err error) {
|
||||||
stop := fmt.Errorf("stop")
|
err = s.byOrgAndDatabase.Walk(context.Background(), tx, compKey, func(k, v []byte) (bool, error) {
|
||||||
var next []byte
|
|
||||||
if err := s.byOrgAndDatabase.Walk(context.Background(), tx, compKey, func(k, v []byte) error {
|
|
||||||
if bytes.Equal(skipID, k) {
|
if bytes.Equal(skipID, k) {
|
||||||
return nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
next = k
|
next = k
|
||||||
return stop
|
|
||||||
}); err != nil && err != stop {
|
return false, nil
|
||||||
return nil, ErrInternalService(err)
|
})
|
||||||
}
|
return
|
||||||
return next, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isDBRPUnique verifies if the triple orgID-database-retention-policy is unique.
|
// isDBRPUnique verifies if the triple orgID-database-retention-policy is unique.
|
||||||
func (s *Service) isDBRPUnique(ctx context.Context, m influxdb.DBRPMappingV2) error {
|
func (s *Service) isDBRPUnique(ctx context.Context, m influxdb.DBRPMappingV2) error {
|
||||||
return s.store.View(ctx, func(tx kv.Tx) error {
|
return s.store.View(ctx, func(tx kv.Tx) error {
|
||||||
return s.byOrgAndDatabase.Walk(ctx, tx, composeForeignKey(m.OrganizationID, m.Database), func(k, v []byte) error {
|
return s.byOrgAndDatabase.Walk(ctx, tx, composeForeignKey(m.OrganizationID, m.Database), func(k, v []byte) (bool, error) {
|
||||||
dbrp := &influxdb.DBRPMappingV2{}
|
dbrp := &influxdb.DBRPMappingV2{}
|
||||||
if err := json.Unmarshal(v, dbrp); err != nil {
|
if err := json.Unmarshal(v, dbrp); err != nil {
|
||||||
return ErrInternalService(err)
|
return false, ErrInternalService(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if dbrp.ID == m.ID {
|
if dbrp.ID == m.ID {
|
||||||
// Corner case.
|
// Corner case.
|
||||||
// This is the very same DBRP, just skip it!
|
// This is the very same DBRP, just skip it!
|
||||||
return nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if dbrp.RetentionPolicy == m.RetentionPolicy {
|
if dbrp.RetentionPolicy == m.RetentionPolicy {
|
||||||
return ErrDBRPAlreadyExists("another DBRP mapping with same orgID, db, and rp exists")
|
return false, ErrDBRPAlreadyExists("another DBRP mapping with same orgID, db, and rp exists")
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
return true, nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -254,22 +254,23 @@ func (s *Service) FindMany(ctx context.Context, filter influxdb.DBRPMappingFilte
|
||||||
}
|
}
|
||||||
|
|
||||||
ms := []*influxdb.DBRPMappingV2{}
|
ms := []*influxdb.DBRPMappingV2{}
|
||||||
add := func(tx kv.Tx) func(k, v []byte) error {
|
add := func(tx kv.Tx) func(k, v []byte) (bool, error) {
|
||||||
return func(k, v []byte) error {
|
return func(k, v []byte) (bool, error) {
|
||||||
m := influxdb.DBRPMappingV2{}
|
m := influxdb.DBRPMappingV2{}
|
||||||
if err := json.Unmarshal(v, &m); err != nil {
|
if err := json.Unmarshal(v, &m); err != nil {
|
||||||
return ErrInternalService(err)
|
return false, ErrInternalService(err)
|
||||||
}
|
}
|
||||||
// Updating the Default field must be done before filtering.
|
// Updating the Default field must be done before filtering.
|
||||||
defID, err := get(tx, m.OrganizationID, m.Database)
|
defID, err := get(tx, m.OrganizationID, m.Database)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ErrInternalService(err)
|
return false, ErrInternalService(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Default = m.ID == *defID
|
m.Default = m.ID == *defID
|
||||||
if filterFunc(&m, filter) {
|
if filterFunc(&m, filter) {
|
||||||
ms = append(ms, &m)
|
ms = append(ms, &m)
|
||||||
}
|
}
|
||||||
return nil
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -303,7 +304,8 @@ func (s *Service) FindMany(ctx context.Context, filter influxdb.DBRPMappingFilte
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ErrInternalService(err)
|
return ErrInternalService(err)
|
||||||
}
|
}
|
||||||
return add(tx)(defID, v)
|
_, err = add(tx)(defID, v)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return s.byOrgAndDatabase.Walk(ctx, tx, compKey, add(tx))
|
return s.byOrgAndDatabase.Walk(ctx, tx, compKey, add(tx))
|
||||||
|
@ -318,7 +320,7 @@ func (s *Service) FindMany(ctx context.Context, filter influxdb.DBRPMappingFilte
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := cur.First(); k != nil; k, v = cur.Next() {
|
for k, v := cur.First(); k != nil; k, v = cur.Next() {
|
||||||
if err := add(tx)(k, v); err != nil {
|
if _, err := add(tx)(k, v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -278,12 +278,8 @@ func (h *TelegrafHandler) handleGetTelegraf(w http.ResponseWriter, r *http.Reque
|
||||||
|
|
||||||
func decodeTelegrafConfigFilter(ctx context.Context, r *http.Request) (*influxdb.TelegrafConfigFilter, error) {
|
func decodeTelegrafConfigFilter(ctx context.Context, r *http.Request) (*influxdb.TelegrafConfigFilter, error) {
|
||||||
f := &influxdb.TelegrafConfigFilter{}
|
f := &influxdb.TelegrafConfigFilter{}
|
||||||
urm, err := decodeUserResourceMappingFilter(ctx, r, influxdb.TelegrafsResourceType)
|
|
||||||
if err == nil {
|
|
||||||
f.UserResourceMappingFilter = *urm
|
|
||||||
}
|
|
||||||
|
|
||||||
q := r.URL.Query()
|
q := r.URL.Query()
|
||||||
|
|
||||||
if orgIDStr := q.Get("orgID"); orgIDStr != "" {
|
if orgIDStr := q.Get("orgID"); orgIDStr != "" {
|
||||||
orgID, err := influxdb.IDFromString(orgIDStr)
|
orgID, err := influxdb.IDFromString(orgIDStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -297,7 +293,8 @@ func decodeTelegrafConfigFilter(ctx context.Context, r *http.Request) (*influxdb
|
||||||
} else if orgNameStr := q.Get("org"); orgNameStr != "" {
|
} else if orgNameStr := q.Get("org"); orgNameStr != "" {
|
||||||
f.Organization = &orgNameStr
|
f.Organization = &orgNameStr
|
||||||
}
|
}
|
||||||
return f, err
|
|
||||||
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// handlePostTelegraf is the HTTP handler for the POST /api/v2/telegrafs route.
|
// handlePostTelegraf is the HTTP handler for the POST /api/v2/telegrafs route.
|
||||||
|
@ -445,12 +442,6 @@ func (s *TelegrafService) FindTelegrafConfigs(ctx context.Context, f influxdb.Te
|
||||||
if f.Organization != nil {
|
if f.Organization != nil {
|
||||||
params = append(params, [2]string{"organization", *f.Organization})
|
params = append(params, [2]string{"organization", *f.Organization})
|
||||||
}
|
}
|
||||||
if f.ResourceID != 0 {
|
|
||||||
params = append(params, [2]string{"resourceID", f.ResourceID.String()})
|
|
||||||
}
|
|
||||||
if f.UserID != 0 {
|
|
||||||
params = append(params, [2]string{"userID", f.UserID.String()})
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp struct {
|
var resp struct {
|
||||||
Configs []*influxdb.TelegrafConfig `json:"configurations"`
|
Configs []*influxdb.TelegrafConfig `json:"configurations"`
|
||||||
|
|
|
@ -222,7 +222,7 @@ func indexWalk(ctx context.Context, indexCursor ForwardCursor, sourceBucket Buck
|
||||||
|
|
||||||
for i, value := range values {
|
for i, value := range values {
|
||||||
if value != nil {
|
if value != nil {
|
||||||
if err := visit(keys[i], value); err != nil {
|
if cont, err := visit(keys[i], value); !cont || err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -390,9 +390,9 @@ func consumeBucket(ctx context.Context, store Store, fn func(tx Tx) (Bucket, err
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return WalkCursor(ctx, cursor, func(k, v []byte) error {
|
return WalkCursor(ctx, cursor, func(k, v []byte) (bool, error) {
|
||||||
kvs = append(kvs, [2][]byte{k, v})
|
kvs = append(kvs, [2][]byte{k, v})
|
||||||
return nil
|
return true, nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -253,24 +253,24 @@ func (m *Migrator) walk(ctx context.Context, store kv.Store, fn func(id influxdb
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return kv.WalkCursor(ctx, cursor, func(k, v []byte) error {
|
return kv.WalkCursor(ctx, cursor, func(k, v []byte) (bool, error) {
|
||||||
var id influxdb.ID
|
var id influxdb.ID
|
||||||
if err := id.Decode(k); err != nil {
|
if err := id.Decode(k); err != nil {
|
||||||
return fmt.Errorf("decoding migration id: %w", err)
|
return false, fmt.Errorf("decoding migration id: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var migration Migration
|
var migration Migration
|
||||||
if err := json.Unmarshal(v, &migration); err != nil {
|
if err := json.Unmarshal(v, &migration); err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
idx := int(id) - 1
|
idx := int(id) - 1
|
||||||
if idx >= len(m.Specs) {
|
if idx >= len(m.Specs) {
|
||||||
return fmt.Errorf("migration %q: %w", migration.Name, ErrMigrationSpecNotFound)
|
return false, fmt.Errorf("migration %q: %w", migration.Name, ErrMigrationSpecNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
if spec := m.Specs[idx]; spec.MigrationName() != migration.Name {
|
if spec := m.Specs[idx]; spec.MigrationName() != migration.Name {
|
||||||
return fmt.Errorf("expected migration %q, found %q", spec.MigrationName(), migration.Name)
|
return false, fmt.Errorf("expected migration %q, found %q", spec.MigrationName(), migration.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if migration.FinishedAt != nil {
|
if migration.FinishedAt != nil {
|
||||||
|
@ -279,7 +279,7 @@ func (m *Migrator) walk(ctx context.Context, store kv.Store, fn func(id influxdb
|
||||||
|
|
||||||
fn(id, migration)
|
fn(id, migration)
|
||||||
|
|
||||||
return nil
|
return true, nil
|
||||||
})
|
})
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("reading migrations: %w", err)
|
return fmt.Errorf("reading migrations: %w", err)
|
||||||
|
|
|
@ -234,7 +234,7 @@ func WithCursorLimit(limit int) CursorOption {
|
||||||
|
|
||||||
// VisitFunc is called for each k, v byte slice pair from the underlying source bucket
|
// VisitFunc is called for each k, v byte slice pair from the underlying source bucket
|
||||||
// which are found in the index bucket for a provided foreign key.
|
// which are found in the index bucket for a provided foreign key.
|
||||||
type VisitFunc func(k, v []byte) error
|
type VisitFunc func(k, v []byte) (bool, error)
|
||||||
|
|
||||||
// WalkCursor consumers the forward cursor call visit for each k/v pair found
|
// WalkCursor consumers the forward cursor call visit for each k/v pair found
|
||||||
func WalkCursor(ctx context.Context, cursor ForwardCursor, visit VisitFunc) (err error) {
|
func WalkCursor(ctx context.Context, cursor ForwardCursor, visit VisitFunc) (err error) {
|
||||||
|
@ -245,7 +245,7 @@ func WalkCursor(ctx context.Context, cursor ForwardCursor, visit VisitFunc) (err
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for k, v := cursor.Next(); k != nil; k, v = cursor.Next() {
|
for k, v := cursor.Next(); k != nil; k, v = cursor.Next() {
|
||||||
if err := visit(k, v); err != nil {
|
if cont, err := visit(k, v); !cont || err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -136,17 +136,17 @@ func (s *Service) findUserResourceMappings(ctx context.Context, tx Tx, filter in
|
||||||
if filter.UserID.Valid() {
|
if filter.UserID.Valid() {
|
||||||
// urm by user index lookup
|
// urm by user index lookup
|
||||||
userID, _ := filter.UserID.Encode()
|
userID, _ := filter.UserID.Encode()
|
||||||
if err := s.urmByUserIndex.Walk(ctx, tx, userID, func(k, v []byte) error {
|
if err := s.urmByUserIndex.Walk(ctx, tx, userID, func(k, v []byte) (bool, error) {
|
||||||
m := &influxdb.UserResourceMapping{}
|
m := &influxdb.UserResourceMapping{}
|
||||||
if err := json.Unmarshal(v, m); err != nil {
|
if err := json.Unmarshal(v, m); err != nil {
|
||||||
return CorruptURMError(err)
|
return false, CorruptURMError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if filterFn(m) {
|
if filterFn(m) {
|
||||||
ms = append(ms, m)
|
ms = append(ms, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return true, nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
14
paging.go
14
paging.go
|
@ -34,6 +34,20 @@ type FindOptions struct {
|
||||||
Descending bool
|
Descending bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLimit returns the resolved limit between then limit boundaries.
|
||||||
|
// Given a limit <= 0 it returns the default limit.
|
||||||
|
func (f *FindOptions) GetLimit() int {
|
||||||
|
if f == nil || f.Limit <= 0 {
|
||||||
|
return DefaultPageSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Limit > MaxPageSize {
|
||||||
|
return MaxPageSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.Limit
|
||||||
|
}
|
||||||
|
|
||||||
// DecodeFindOptions returns a FindOptions decoded from http request.
|
// DecodeFindOptions returns a FindOptions decoded from http request.
|
||||||
func DecodeFindOptions(r *http.Request) (*FindOptions, error) {
|
func DecodeFindOptions(r *http.Request) (*FindOptions, error) {
|
||||||
opts := &FindOptions{}
|
opts := &FindOptions{}
|
||||||
|
|
|
@ -81,19 +81,20 @@ func (s *Storage) ListSecret(ctx context.Context, tx kv.Tx, orgID influxdb.ID) (
|
||||||
|
|
||||||
keys := []string{}
|
keys := []string{}
|
||||||
|
|
||||||
err = kv.WalkCursor(ctx, cur, func(k, v []byte) error {
|
err = kv.WalkCursor(ctx, cur, func(k, v []byte) (bool, error) {
|
||||||
id, key, err := decodeSecretKey(k)
|
id, key, err := decodeSecretKey(k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if id != orgID {
|
if id != orgID {
|
||||||
// We've reached the end of the keyspace for the provided orgID
|
// We've reached the end of the keyspace for the provided orgID
|
||||||
return nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
return nil
|
|
||||||
|
return true, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -54,7 +54,6 @@ type TelegrafConfigStore interface {
|
||||||
type TelegrafConfigFilter struct {
|
type TelegrafConfigFilter struct {
|
||||||
OrgID *ID
|
OrgID *ID
|
||||||
Organization *string
|
Organization *string
|
||||||
UserResourceMappingFilter
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TelegrafConfig stores telegraf config for one telegraf instance.
|
// TelegrafConfig stores telegraf config for one telegraf instance.
|
||||||
|
|
|
@ -155,25 +155,40 @@ func (s *Service) findTelegrafConfigByID(ctx context.Context, tx kv.Tx, id influ
|
||||||
// Additional options provide pagination & sorting.
|
// Additional options provide pagination & sorting.
|
||||||
func (s *Service) FindTelegrafConfigs(ctx context.Context, filter influxdb.TelegrafConfigFilter, opt ...influxdb.FindOptions) (tcs []*influxdb.TelegrafConfig, n int, err error) {
|
func (s *Service) FindTelegrafConfigs(ctx context.Context, filter influxdb.TelegrafConfigFilter, opt ...influxdb.FindOptions) (tcs []*influxdb.TelegrafConfig, n int, err error) {
|
||||||
err = s.kv.View(ctx, func(tx kv.Tx) error {
|
err = s.kv.View(ctx, func(tx kv.Tx) error {
|
||||||
tcs, n, err = s.findTelegrafConfigs(ctx, tx, filter)
|
tcs, n, err = s.findTelegrafConfigs(ctx, tx, filter, opt...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return tcs, n, err
|
return tcs, n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) findTelegrafConfigs(ctx context.Context, tx kv.Tx, filter influxdb.TelegrafConfigFilter, opt ...influxdb.FindOptions) ([]*influxdb.TelegrafConfig, int, error) {
|
func (s *Service) findTelegrafConfigs(ctx context.Context, tx kv.Tx, filter influxdb.TelegrafConfigFilter, opt ...influxdb.FindOptions) ([]*influxdb.TelegrafConfig, int, error) {
|
||||||
tcs := make([]*influxdb.TelegrafConfig, 0)
|
var (
|
||||||
|
limit = influxdb.DefaultPageSize
|
||||||
|
offset int
|
||||||
|
count int
|
||||||
|
tcs = make([]*influxdb.TelegrafConfig, 0)
|
||||||
|
)
|
||||||
|
|
||||||
visit := func(k, v []byte) error {
|
if len(opt) > 0 {
|
||||||
|
limit = opt[0].GetLimit()
|
||||||
|
offset = opt[0].Offset
|
||||||
|
}
|
||||||
|
|
||||||
|
visit := func(k, v []byte) (bool, error) {
|
||||||
var tc influxdb.TelegrafConfig
|
var tc influxdb.TelegrafConfig
|
||||||
if err := json.Unmarshal(v, &tc); err != nil {
|
if err := json.Unmarshal(v, &tc); err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tcs = append(tcs, &tc)
|
// skip until offset reached
|
||||||
|
if count >= offset {
|
||||||
|
tcs = append(tcs, &tc)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
count++
|
||||||
|
|
||||||
|
// stop cursing when limit is reached
|
||||||
|
return len(tcs) < limit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if filter.OrgID == nil {
|
if filter.OrgID == nil {
|
||||||
|
@ -183,8 +198,14 @@ func (s *Service) findTelegrafConfigs(ctx context.Context, tx kv.Tx, filter infl
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(georgemac): convert find options into cursor options
|
// cursors do not support numeric offset
|
||||||
cursor, err := bucket.ForwardCursor(nil)
|
// but we can at least constrain the response
|
||||||
|
// size by the offset + limit since we are
|
||||||
|
// not doing any other filtering
|
||||||
|
// REMOVE this cursor option if you do any
|
||||||
|
// other filtering
|
||||||
|
|
||||||
|
cursor, err := bucket.ForwardCursor(nil, kv.WithCursorLimit(offset+limit))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -364,6 +364,7 @@ func FindTelegrafConfigs(
|
||||||
) {
|
) {
|
||||||
type args struct {
|
type args struct {
|
||||||
filter influxdb.TelegrafConfigFilter
|
filter influxdb.TelegrafConfigFilter
|
||||||
|
opts []influxdb.FindOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
type wants struct {
|
type wants struct {
|
||||||
|
@ -518,6 +519,65 @@ func FindTelegrafConfigs(
|
||||||
telegrafConfigs: []*influxdb.TelegrafConfig{},
|
telegrafConfigs: []*influxdb.TelegrafConfig{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "find with limit and offset",
|
||||||
|
fields: TelegrafConfigFields{
|
||||||
|
IDGenerator: mock.NewIncrementingIDGenerator(oneID),
|
||||||
|
TelegrafConfigs: []*influxdb.TelegrafConfig{
|
||||||
|
{
|
||||||
|
ID: oneID,
|
||||||
|
OrgID: fourID,
|
||||||
|
Name: "tc1",
|
||||||
|
Config: "[[inputs.cpu]]\n",
|
||||||
|
Metadata: map[string]interface{}{"buckets": []interface{}{}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: twoID,
|
||||||
|
OrgID: fourID,
|
||||||
|
Name: "tc2",
|
||||||
|
Config: "[[inputs.file]]\n[[inputs.mem]]\n",
|
||||||
|
Metadata: map[string]interface{}{"buckets": []interface{}{}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: threeID,
|
||||||
|
OrgID: oneID,
|
||||||
|
Name: "tc3",
|
||||||
|
Config: "[[inputs.cpu]]\n",
|
||||||
|
Metadata: map[string]interface{}{"buckets": []interface{}{}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: fourID,
|
||||||
|
OrgID: oneID,
|
||||||
|
Name: "tc4",
|
||||||
|
Config: "[[inputs.cpu]]\n",
|
||||||
|
Metadata: map[string]interface{}{"buckets": []interface{}{}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
opts: []influxdb.FindOptions{
|
||||||
|
{Limit: 2, Offset: 1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wants: wants{
|
||||||
|
telegrafConfigs: []*influxdb.TelegrafConfig{
|
||||||
|
{
|
||||||
|
ID: twoID,
|
||||||
|
OrgID: fourID,
|
||||||
|
Name: "tc2",
|
||||||
|
Config: "[[inputs.file]]\n[[inputs.mem]]\n",
|
||||||
|
Metadata: map[string]interface{}{"buckets": []interface{}{}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: threeID,
|
||||||
|
OrgID: oneID,
|
||||||
|
Name: "tc3",
|
||||||
|
Config: "[[inputs.cpu]]\n",
|
||||||
|
Metadata: map[string]interface{}{"buckets": []interface{}{}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
@ -526,7 +586,7 @@ func FindTelegrafConfigs(
|
||||||
defer done()
|
defer done()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
tcs, _, err := s.FindTelegrafConfigs(ctx, tt.args.filter)
|
tcs, _, err := s.FindTelegrafConfigs(ctx, tt.args.filter, tt.args.opts...)
|
||||||
if err != nil && tt.wants.err == nil {
|
if err != nil && tt.wants.err == nil {
|
||||||
t.Fatalf("expected errors to be nil got '%v'", err)
|
t.Fatalf("expected errors to be nil got '%v'", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@ package tenant
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/influxdata/influxdb/v2"
|
"github.com/influxdata/influxdb/v2"
|
||||||
"github.com/influxdata/influxdb/v2/kv"
|
"github.com/influxdata/influxdb/v2/kv"
|
||||||
|
@ -72,37 +71,29 @@ func (s *Store) ListURMs(ctx context.Context, tx kv.Tx, filter influxdb.UserReso
|
||||||
|
|
||||||
if filter.UserID.Valid() {
|
if filter.UserID.Valid() {
|
||||||
var (
|
var (
|
||||||
errPageLimit = errors.New("page limit reached")
|
|
||||||
// urm by user index lookup
|
// urm by user index lookup
|
||||||
userID, _ = filter.UserID.Encode()
|
userID, _ = filter.UserID.Encode()
|
||||||
seen int
|
seen int
|
||||||
)
|
)
|
||||||
|
|
||||||
if err := s.urmByUserIndex.Walk(ctx, tx, userID, func(k, v []byte) error {
|
err := s.urmByUserIndex.Walk(ctx, tx, userID, func(k, v []byte) (bool, error) {
|
||||||
m := &influxdb.UserResourceMapping{}
|
m := &influxdb.UserResourceMapping{}
|
||||||
if err := json.Unmarshal(v, m); err != nil {
|
if err := json.Unmarshal(v, m); err != nil {
|
||||||
return CorruptURMError(err)
|
return false, CorruptURMError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// respect offset parameter
|
// respect offset parameter
|
||||||
reachedOffset := (len(opt) == 0 || seen >= opt[0].Offset)
|
reachedOffset := (len(opt) == 0 || seen >= opt[0].Offset)
|
||||||
if filterFn(m) && reachedOffset {
|
if reachedOffset && filterFn(m) {
|
||||||
ms = append(ms, m)
|
ms = append(ms, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// respect pagination in URMs
|
|
||||||
if len(opt) > 0 && opt[0].Limit > 0 && len(ms) >= opt[0].Limit {
|
|
||||||
return errPageLimit
|
|
||||||
}
|
|
||||||
|
|
||||||
seen++
|
seen++
|
||||||
|
|
||||||
return nil
|
return (len(opt) == 0 || opt[0].Limit <= 0 || len(ms) < opt[0].Limit), nil
|
||||||
}); err != nil && err != errPageLimit {
|
})
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ms, nil
|
return ms, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// for now the best we can do is use the resourceID if we have that as a forward cursor option
|
// for now the best we can do is use the resourceID if we have that as a forward cursor option
|
||||||
|
|
|
@ -61,14 +61,14 @@ func newSomeResourceStore(t tester, ctx context.Context, store kv.SchemaStore) *
|
||||||
|
|
||||||
func (s *someResourceStore) FindByOwner(ctx context.Context, ownerID string) (resources []someResource, err error) {
|
func (s *someResourceStore) FindByOwner(ctx context.Context, ownerID string) (resources []someResource, err error) {
|
||||||
err = s.store.View(ctx, func(tx kv.Tx) error {
|
err = s.store.View(ctx, func(tx kv.Tx) error {
|
||||||
return s.ownerIDIndex.Walk(ctx, tx, []byte(ownerID), func(k, v []byte) error {
|
return s.ownerIDIndex.Walk(ctx, tx, []byte(ownerID), func(k, v []byte) (bool, error) {
|
||||||
var resource someResource
|
var resource someResource
|
||||||
if err := json.Unmarshal(v, &resource); err != nil {
|
if err := json.Unmarshal(v, &resource); err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
resources = append(resources, resource)
|
resources = append(resources, resource)
|
||||||
return nil
|
return true, nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
|
@ -410,12 +410,12 @@ func BenchmarkIndexWalk(b *testing.B, store kv.SchemaStore, resourceCount, fetch
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
store.View(ctx, func(tx kv.Tx) error {
|
store.View(ctx, func(tx kv.Tx) error {
|
||||||
return resourceStore.ownerIDIndex.Walk(ctx, tx, []byte(fmt.Sprintf("owner %d", i%userCount)), func(k, v []byte) error {
|
return resourceStore.ownerIDIndex.Walk(ctx, tx, []byte(fmt.Sprintf("owner %d", i%userCount)), func(k, v []byte) (bool, error) {
|
||||||
if k == nil || v == nil {
|
if k == nil || v == nil {
|
||||||
b.Fatal("entries must not be nil")
|
b.Fatal("entries must not be nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return true, nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue