2020-03-11 18:31:33 +00:00
|
|
|
package tenant_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"reflect"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2020-04-03 17:39:20 +00:00
|
|
|
"github.com/influxdata/influxdb/v2"
|
2021-08-31 20:43:45 +00:00
|
|
|
"github.com/influxdata/influxdb/v2/kit/platform"
|
2020-04-03 17:39:20 +00:00
|
|
|
"github.com/influxdata/influxdb/v2/kv"
|
2020-08-11 14:56:42 +00:00
|
|
|
"github.com/influxdata/influxdb/v2/mock"
|
2020-04-03 17:39:20 +00:00
|
|
|
"github.com/influxdata/influxdb/v2/tenant"
|
2021-08-31 20:43:45 +00:00
|
|
|
itesting "github.com/influxdata/influxdb/v2/testing"
|
2020-08-11 14:56:42 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2020-03-11 18:31:33 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// type Bucket struct {
|
|
|
|
// ID ID `json:"id,omitempty"`
|
|
|
|
// OrgID ID `json:"bucketID,omitempty"`
|
|
|
|
// Type BucketType `json:"type"`
|
|
|
|
// Name string `json:"name"`
|
|
|
|
// Description string `json:"description"`
|
|
|
|
// RetentionPolicyName string `json:"rp,omitempty"` // This to support v1 sources
|
|
|
|
// RetentionPeriod time.Duration `json:"retentionPeriod"`
|
|
|
|
// CRUDLog
|
|
|
|
// }
|
|
|
|
|
2020-08-11 14:56:42 +00:00
|
|
|
const (
|
2021-03-30 18:10:02 +00:00
|
|
|
firstBucketID platform.ID = (iota + 1)
|
2020-08-11 14:56:42 +00:00
|
|
|
secondBucketID
|
|
|
|
thirdBucketID
|
|
|
|
fourthBucketID
|
|
|
|
fifthBucketID
|
|
|
|
)
|
|
|
|
|
2021-03-30 18:10:02 +00:00
|
|
|
var orgIDs = []platform.ID{firstOrgID, secondOrgID}
|
2020-08-11 14:56:42 +00:00
|
|
|
|
2020-03-11 18:31:33 +00:00
|
|
|
func TestBucket(t *testing.T) {
|
2020-08-11 14:56:42 +00:00
|
|
|
var (
|
|
|
|
aTime = time.Date(2020, 7, 23, 10, 0, 0, 0, time.UTC)
|
|
|
|
// generate 10 buckets to test with
|
|
|
|
// optionally provide a visit function to manipulate
|
|
|
|
// the generated slice (for convenience)
|
|
|
|
testBuckets = func(count int, visit ...func(*influxdb.Bucket)) (buckets []*influxdb.Bucket) {
|
|
|
|
buckets = make([]*influxdb.Bucket, count)
|
|
|
|
for i := range buckets {
|
2021-03-30 18:10:02 +00:00
|
|
|
id := firstBucketID + platform.ID(i)
|
2020-08-11 14:56:42 +00:00
|
|
|
// flip-flop between (reserved_id + reserved_id+1)
|
|
|
|
orgID := orgIDs[i%2]
|
|
|
|
buckets[i] = &influxdb.Bucket{
|
|
|
|
ID: id,
|
|
|
|
OrgID: orgID,
|
|
|
|
Name: fmt.Sprintf("bucket%d", int(id)),
|
|
|
|
Description: "words",
|
|
|
|
RetentionPolicyName: "name",
|
|
|
|
RetentionPeriod: time.Second,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, fn := range visit {
|
|
|
|
fn(buckets[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
withCrudLog = func(bkt *influxdb.Bucket) {
|
|
|
|
bkt.CRUDLog = influxdb.CRUDLog{
|
|
|
|
CreatedAt: aTime,
|
|
|
|
UpdatedAt: aTime,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2020-03-11 18:31:33 +00:00
|
|
|
simpleSetup := func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
2020-08-11 14:56:42 +00:00
|
|
|
store.BucketIDGen = mock.NewIncrementingIDGenerator(1)
|
|
|
|
for _, bucket := range testBuckets(10) {
|
|
|
|
err := store.CreateBucket(context.Background(), tx, bucket)
|
2020-03-11 18:31:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-20 14:30:38 +00:00
|
|
|
over20Setup := func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
|
|
|
store.BucketIDGen = mock.NewIncrementingIDGenerator(1)
|
|
|
|
for _, bucket := range testBuckets(24) {
|
|
|
|
err := store.CreateBucket(context.Background(), tx, bucket)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 18:31:33 +00:00
|
|
|
st := []struct {
|
|
|
|
name string
|
|
|
|
setup func(*testing.T, *tenant.Store, kv.Tx)
|
|
|
|
update func(*testing.T, *tenant.Store, kv.Tx)
|
|
|
|
results func(*testing.T, *tenant.Store, kv.Tx)
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "create",
|
|
|
|
setup: simpleSetup,
|
|
|
|
results: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
|
|
|
buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(buckets) != 10 {
|
|
|
|
t.Fatalf("expected 10 buckets got: %d", len(buckets))
|
|
|
|
}
|
|
|
|
|
2020-08-11 14:56:42 +00:00
|
|
|
expected := testBuckets(10, withCrudLog)
|
|
|
|
assert.Equal(t, expected, buckets)
|
2020-03-11 18:31:33 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "get",
|
|
|
|
setup: simpleSetup,
|
|
|
|
results: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
2020-08-11 14:56:42 +00:00
|
|
|
bucket, err := store.GetBucket(context.Background(), tx, fifthBucketID)
|
|
|
|
assert.NoError(t, err)
|
2020-03-11 18:31:33 +00:00
|
|
|
|
|
|
|
expected := &influxdb.Bucket{
|
2020-08-11 14:56:42 +00:00
|
|
|
ID: fifthBucketID,
|
|
|
|
OrgID: firstOrgID,
|
2020-03-11 18:31:33 +00:00
|
|
|
Name: "bucket5",
|
|
|
|
Description: "words",
|
|
|
|
RetentionPolicyName: "name",
|
|
|
|
RetentionPeriod: time.Second,
|
|
|
|
CRUDLog: influxdb.CRUDLog{
|
2020-08-11 14:56:42 +00:00
|
|
|
CreatedAt: aTime,
|
|
|
|
UpdatedAt: aTime,
|
2020-03-11 18:31:33 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-08-11 14:56:42 +00:00
|
|
|
assert.Equal(t, expected, bucket)
|
2020-03-11 18:31:33 +00:00
|
|
|
|
2020-08-11 14:56:42 +00:00
|
|
|
bucket, err = store.GetBucketByName(context.Background(), tx, firstOrgID, "bucket5")
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, expected, bucket)
|
2020-03-11 18:31:33 +00:00
|
|
|
|
2020-08-11 14:56:42 +00:00
|
|
|
if _, err := store.GetBucket(context.Background(), tx, 11); err != tenant.ErrBucketNotFound {
|
|
|
|
t.Fatal("failed to get correct error when looking for non present bucket by id")
|
2020-03-11 18:31:33 +00:00
|
|
|
}
|
|
|
|
|
2020-03-17 19:23:00 +00:00
|
|
|
if _, err := store.GetBucketByName(context.Background(), tx, 3, "notabucket"); err.Error() != tenant.ErrBucketNotFoundByName("notabucket").Error() {
|
2020-03-11 18:31:33 +00:00
|
|
|
t.Fatal("failed to get correct error when looking for invalid bucket by name")
|
|
|
|
}
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "list",
|
|
|
|
setup: simpleSetup,
|
|
|
|
results: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
2020-08-11 14:56:42 +00:00
|
|
|
expected := testBuckets(10, withCrudLog)
|
|
|
|
orgID := firstOrgID
|
|
|
|
buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{OrganizationID: &orgID})
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Len(t, buckets, 5)
|
2020-03-11 18:31:33 +00:00
|
|
|
|
|
|
|
orgExpected := []*influxdb.Bucket{
|
2020-08-11 14:56:42 +00:00
|
|
|
expected[0], // id 10 => 000a which is alphabetically first
|
|
|
|
expected[2],
|
|
|
|
expected[4],
|
|
|
|
expected[6],
|
|
|
|
expected[8],
|
2020-03-11 18:31:33 +00:00
|
|
|
}
|
2020-08-11 14:56:42 +00:00
|
|
|
assert.Equal(t, orgExpected, buckets)
|
2020-03-11 18:31:33 +00:00
|
|
|
|
|
|
|
buckets, err = store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}, influxdb.FindOptions{Limit: 4})
|
2020-08-11 14:56:42 +00:00
|
|
|
require.NoError(t, err)
|
2020-03-11 18:31:33 +00:00
|
|
|
|
|
|
|
if len(buckets) != 4 {
|
|
|
|
t.Fatalf("expected 4 buckets got: %d", len(buckets))
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(buckets, expected[:4]) {
|
|
|
|
t.Fatalf("expected identical buckets with limit: \n%+v\n%+v", buckets, expected[:4])
|
|
|
|
}
|
|
|
|
|
|
|
|
buckets, err = store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}, influxdb.FindOptions{Offset: 3})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(buckets) != 7 {
|
|
|
|
t.Fatalf("expected 7 buckets got: %d", len(buckets))
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(buckets, expected[3:]) {
|
|
|
|
t.Fatalf("expected identical buckets with limit: \n%+v\n%+v", buckets, expected[3:])
|
|
|
|
}
|
|
|
|
},
|
|
|
|
},
|
2022-09-20 14:30:38 +00:00
|
|
|
{
|
|
|
|
name: "listOver20",
|
|
|
|
setup: over20Setup,
|
|
|
|
results: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
|
|
|
buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Len(t, buckets, 24)
|
|
|
|
},
|
|
|
|
},
|
2020-08-25 13:59:59 +00:00
|
|
|
{
|
|
|
|
name: "list all with limit 3 using after to paginate",
|
|
|
|
setup: simpleSetup,
|
|
|
|
results: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
|
|
|
var (
|
|
|
|
expected = testBuckets(10, withCrudLog)
|
|
|
|
found []*influxdb.Bucket
|
2021-03-30 18:10:02 +00:00
|
|
|
lastID *platform.ID
|
2020-08-25 13:59:59 +00:00
|
|
|
limit = 3
|
2021-03-30 18:10:02 +00:00
|
|
|
listAfter = func(after *platform.ID) ([]*influxdb.Bucket, error) {
|
2020-08-25 13:59:59 +00:00
|
|
|
return store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}, influxdb.FindOptions{
|
|
|
|
After: after,
|
|
|
|
Limit: limit,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
b []*influxdb.Bucket
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
for b, err = listAfter(lastID); err == nil; b, err = listAfter(lastID) {
|
|
|
|
lastID = &b[len(b)-1].ID
|
|
|
|
found = append(found, b...)
|
|
|
|
|
|
|
|
// given we've seen the last page
|
|
|
|
if len(b) < limit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
assert.Equal(t, expected, found)
|
|
|
|
},
|
|
|
|
},
|
2021-10-14 22:09:28 +00:00
|
|
|
{
|
|
|
|
name: "list in org with pagination",
|
|
|
|
setup: simpleSetup,
|
|
|
|
results: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
|
|
|
allBuckets := testBuckets(10, withCrudLog)
|
|
|
|
orgID := secondOrgID
|
|
|
|
allInOrg := []*influxdb.Bucket{
|
|
|
|
allBuckets[9], // id 10 => 000a which is alphabetically first
|
|
|
|
allBuckets[1],
|
|
|
|
allBuckets[3],
|
|
|
|
allBuckets[5],
|
|
|
|
allBuckets[7],
|
|
|
|
}
|
|
|
|
|
|
|
|
buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{OrganizationID: &orgID})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, allInOrg, buckets)
|
|
|
|
|
|
|
|
// Test pagination using `after` and `limit`.
|
|
|
|
afterBuckets, err := store.ListBuckets(
|
|
|
|
context.Background(), tx,
|
|
|
|
tenant.BucketFilter{OrganizationID: &orgID},
|
|
|
|
influxdb.FindOptions{After: &allInOrg[1].ID, Limit: 2},
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, allInOrg[2:4], afterBuckets)
|
|
|
|
|
|
|
|
// Test pagination using `offset` and `limit`.
|
|
|
|
offsetBuckets, err := store.ListBuckets(
|
|
|
|
context.Background(), tx,
|
|
|
|
tenant.BucketFilter{OrganizationID: &orgID},
|
|
|
|
influxdb.FindOptions{Offset: 3, Limit: 1},
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, allInOrg[3:4], offsetBuckets)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "list descending in org with pagination",
|
|
|
|
setup: simpleSetup,
|
|
|
|
results: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
|
|
|
allBuckets := testBuckets(10, withCrudLog)
|
|
|
|
orgID := secondOrgID
|
|
|
|
allInOrg := []*influxdb.Bucket{
|
|
|
|
allBuckets[7],
|
|
|
|
allBuckets[5],
|
|
|
|
allBuckets[3],
|
|
|
|
allBuckets[1],
|
|
|
|
allBuckets[9], // id 10 => 000a which is alphabetically first
|
|
|
|
}
|
|
|
|
|
|
|
|
buckets, err := store.ListBuckets(
|
|
|
|
context.Background(), tx,
|
|
|
|
tenant.BucketFilter{OrganizationID: &orgID},
|
|
|
|
influxdb.FindOptions{Descending: true},
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, allInOrg, buckets)
|
|
|
|
|
|
|
|
// Test pagination using `after` and `limit`.
|
|
|
|
afterBuckets, err := store.ListBuckets(
|
|
|
|
context.Background(), tx,
|
|
|
|
tenant.BucketFilter{OrganizationID: &orgID},
|
|
|
|
influxdb.FindOptions{After: &allInOrg[1].ID, Limit: 2, Descending: true},
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, allInOrg[2:4], afterBuckets)
|
|
|
|
|
|
|
|
// Test pagination using `offset` and `limit`.
|
|
|
|
offsetBuckets, err := store.ListBuckets(
|
|
|
|
context.Background(), tx,
|
|
|
|
tenant.BucketFilter{OrganizationID: &orgID},
|
|
|
|
influxdb.FindOptions{Offset: 3, Limit: 1, Descending: true},
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, allInOrg[3:4], offsetBuckets)
|
|
|
|
},
|
|
|
|
},
|
2020-03-11 18:31:33 +00:00
|
|
|
{
|
|
|
|
name: "update",
|
|
|
|
setup: simpleSetup,
|
|
|
|
update: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
|
|
|
bucket5 := "bucket5"
|
2020-08-11 14:56:42 +00:00
|
|
|
_, err := store.UpdateBucket(context.Background(), tx, thirdBucketID, influxdb.BucketUpdate{Name: &bucket5})
|
2020-03-17 19:23:00 +00:00
|
|
|
if err != tenant.ErrBucketNameNotUnique {
|
2020-03-11 18:31:33 +00:00
|
|
|
t.Fatal("failed to error on duplicate bucketname")
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket30 := "bucket30"
|
2020-08-11 14:56:42 +00:00
|
|
|
_, err = store.UpdateBucket(context.Background(), tx, thirdBucketID, influxdb.BucketUpdate{Name: &bucket30})
|
|
|
|
require.NoError(t, err)
|
2020-03-11 18:31:33 +00:00
|
|
|
|
|
|
|
description := "notWords"
|
2020-08-11 14:56:42 +00:00
|
|
|
_, err = store.UpdateBucket(context.Background(), tx, thirdBucketID, influxdb.BucketUpdate{Description: &description})
|
|
|
|
require.NoError(t, err)
|
2020-03-11 18:31:33 +00:00
|
|
|
},
|
|
|
|
results: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
|
|
|
buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-08-11 14:56:42 +00:00
|
|
|
expected := testBuckets(10, withCrudLog)
|
2020-03-11 18:31:33 +00:00
|
|
|
expected[2].Name = "bucket30"
|
|
|
|
expected[2].Description = "notWords"
|
2020-08-11 14:56:42 +00:00
|
|
|
assert.Equal(t, expected, buckets)
|
2020-03-11 18:31:33 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "delete",
|
|
|
|
setup: simpleSetup,
|
|
|
|
update: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
2020-08-11 14:56:42 +00:00
|
|
|
err := store.DeleteBucket(context.Background(), tx, firstBucketID)
|
|
|
|
require.NoError(t, err)
|
2020-03-11 18:31:33 +00:00
|
|
|
|
2020-08-11 14:56:42 +00:00
|
|
|
err = store.DeleteBucket(context.Background(), tx, firstBucketID)
|
2020-03-11 18:31:33 +00:00
|
|
|
if err != tenant.ErrBucketNotFound {
|
|
|
|
t.Fatal("invalid error when deleting bucket that has already been deleted", err)
|
|
|
|
}
|
|
|
|
|
2020-08-11 14:56:42 +00:00
|
|
|
err = store.DeleteBucket(context.Background(), tx, secondBucketID)
|
|
|
|
require.NoError(t, err)
|
2020-03-11 18:31:33 +00:00
|
|
|
},
|
|
|
|
results: func(t *testing.T, store *tenant.Store, tx kv.Tx) {
|
|
|
|
buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-08-11 14:56:42 +00:00
|
|
|
expected := testBuckets(10, withCrudLog)[2:]
|
|
|
|
assert.Equal(t, expected, buckets)
|
2020-03-11 18:31:33 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, testScenario := range st {
|
|
|
|
t.Run(testScenario.name, func(t *testing.T) {
|
2021-08-31 20:43:45 +00:00
|
|
|
s := itesting.NewTestInmemStore(t)
|
2020-08-11 14:56:42 +00:00
|
|
|
ts := tenant.NewStore(s, tenant.WithNow(func() time.Time {
|
|
|
|
return aTime
|
|
|
|
}))
|
2020-03-11 18:31:33 +00:00
|
|
|
|
|
|
|
// setup
|
|
|
|
if testScenario.setup != nil {
|
|
|
|
err := ts.Update(context.Background(), func(tx kv.Tx) error {
|
|
|
|
testScenario.setup(t, ts, tx)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// update
|
|
|
|
if testScenario.update != nil {
|
|
|
|
err := ts.Update(context.Background(), func(tx kv.Tx) error {
|
|
|
|
testScenario.update(t, ts, tx)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// results
|
|
|
|
if testScenario.results != nil {
|
|
|
|
err := ts.View(context.Background(), func(tx kv.Tx) error {
|
|
|
|
testScenario.results(t, ts, tx)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|