influxdb/tsdb/index_test.go

782 lines
24 KiB
Go
Raw Normal View History

2017-12-02 23:52:34 +00:00
package tsdb_test
import (
"compress/gzip"
2017-11-16 14:44:33 +00:00
"fmt"
2022-03-31 21:17:57 +00:00
"io"
2017-12-08 17:11:07 +00:00
"os"
2017-11-16 14:44:33 +00:00
"path/filepath"
2017-12-02 23:52:34 +00:00
"reflect"
"sync"
2017-12-02 23:52:34 +00:00
"testing"
2017-11-16 14:44:33 +00:00
"github.com/influxdata/influxdb/internal"
2018-02-05 18:51:03 +00:00
"github.com/influxdata/influxdb/logger"
2017-11-16 14:44:33 +00:00
"github.com/influxdata/influxdb/models"
2017-11-17 12:43:31 +00:00
"github.com/influxdata/influxdb/pkg/slices"
"github.com/influxdata/influxdb/query"
2017-12-02 23:52:34 +00:00
"github.com/influxdata/influxdb/tsdb"
2017-11-17 12:43:31 +00:00
"github.com/influxdata/influxdb/tsdb/index/inmem"
2018-02-05 18:51:03 +00:00
"github.com/influxdata/influxdb/tsdb/index/tsi1"
2017-11-16 14:44:33 +00:00
"github.com/influxdata/influxql"
2017-12-02 23:52:34 +00:00
)
// Ensure iterator can merge multiple iterators together.
func TestMergeSeriesIDIterators(t *testing.T) {
itr := tsdb.MergeSeriesIDIterators(
tsdb.NewSeriesIDSliceIterator([]uint64{1, 2, 3}),
tsdb.NewSeriesIDSliceIterator(nil),
nil,
2017-12-02 23:52:34 +00:00
tsdb.NewSeriesIDSliceIterator([]uint64{1, 2, 3, 4}),
)
if e, err := itr.Next(); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 1}) {
t.Fatalf("unexpected elem(0): %#v", e)
}
if e, err := itr.Next(); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 2}) {
t.Fatalf("unexpected elem(1): %#v", e)
}
if e, err := itr.Next(); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 3}) {
t.Fatalf("unexpected elem(2): %#v", e)
}
if e, err := itr.Next(); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 4}) {
t.Fatalf("unexpected elem(3): %#v", e)
}
if e, err := itr.Next(); err != nil {
t.Fatal(err)
} else if e.SeriesID != 0 {
t.Fatalf("expected nil elem: %#v", e)
}
}
2017-12-08 17:11:07 +00:00
func TestIndexSet_MeasurementNamesByExpr(t *testing.T) {
2017-11-16 14:44:33 +00:00
// Setup indexes
indexes := map[string]*Index{}
for _, name := range tsdb.RegisteredIndexes() {
2018-02-02 13:24:49 +00:00
idx := MustOpenNewIndex(name)
2017-11-16 14:44:33 +00:00
idx.AddSeries("cpu", map[string]string{"region": "east"})
idx.AddSeries("cpu", map[string]string{"region": "west", "secret": "foo"})
idx.AddSeries("disk", map[string]string{"secret": "foo"})
idx.AddSeries("mem", map[string]string{"region": "west"})
idx.AddSeries("gpu", map[string]string{"region": "east"})
idx.AddSeries("pci", map[string]string{"region": "east", "secret": "foo"})
indexes[name] = idx
2017-12-08 17:11:07 +00:00
defer idx.Close()
2017-11-16 14:44:33 +00:00
}
authorizer := &internal.AuthorizerMock{
AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool {
if tags.GetString("secret") != "" {
t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags)
return false
}
return true
},
}
type example struct {
name string
expr influxql.Expr
expected [][]byte
}
// These examples should be run without any auth.
examples := []example{
2017-11-17 12:43:31 +00:00
{name: "all", expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")},
{name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("cpu", "mem")},
{name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("gpu", "pci")},
{name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem", "pci")},
{name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("gpu", "pci")},
2017-11-16 14:44:33 +00:00
}
// These examples should be run with the authorizer.
authExamples := []example{
2017-11-17 12:43:31 +00:00
{name: "all", expected: slices.StringsToBytes("cpu", "gpu", "mem")},
{name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("mem")},
{name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("gpu")},
{name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem")},
{name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("gpu")},
2017-11-16 14:44:33 +00:00
}
for _, idx := range tsdb.RegisteredIndexes() {
t.Run(idx, func(t *testing.T) {
t.Run("no authorization", func(t *testing.T) {
for _, example := range examples {
t.Run(example.name, func(t *testing.T) {
2017-12-12 21:22:42 +00:00
names, err := indexes[idx].IndexSet().MeasurementNamesByExpr(nil, example.expr)
2017-11-16 14:44:33 +00:00
if err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(names, example.expected) {
2017-11-17 12:43:31 +00:00
t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected))
2017-11-16 14:44:33 +00:00
}
})
}
})
t.Run("with authorization", func(t *testing.T) {
for _, example := range authExamples {
t.Run(example.name, func(t *testing.T) {
2017-12-12 21:22:42 +00:00
names, err := indexes[idx].IndexSet().MeasurementNamesByExpr(authorizer, example.expr)
2017-11-16 14:44:33 +00:00
if err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(names, example.expected) {
2017-11-17 12:43:31 +00:00
t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected))
fix(services/storage): multi measurement queries return all applicable series (#19592) (#20934) This fixes multi measurement queries that go through the storage service to correctly pick up all series that apply with the filter. Previously, negative queries such as `!=`, `!~`, and predicates attempting to match empty tags did not work correctly with the storage service when multiple measurements or `OR` conditions were included. This was because these predicates would be categorized as "multiple measurements" and then it would attempt to use the field keys iterator to find the fields for each measurement. The meta queries for these did not correctly account for negative equality operators or empty tags when finding appropriate measurements and those could not be changed because it would cause a breaking change to influxql too. This modifies the storage service to use new methods that correctly account for the above situations rather than the field keys iterator. Some queries that appeared to be single measurement queries also get considered as multiple measurement queries. Any query with an `OR` condition will be considered a multiple measurement query. This bug did not apply to single measurement queries where one measurement was selected and all of the logical operators were `AND` values. This is because it used a different code path that correctly handled these situations. Backport of #19566. (cherry picked from commit ceead88bd5b1cf0d808031f02ba6a05ac1343eb9) Co-authored-by: Jonathan A. Sternberg <jonathan@influxdata.com>
2021-03-12 21:34:14 +00:00
}
})
}
})
})
}
}
func TestIndexSet_MeasurementNamesByPredicate(t *testing.T) {
// Setup indexes
indexes := map[string]*Index{}
for _, name := range tsdb.RegisteredIndexes() {
idx := MustOpenNewIndex(name)
idx.AddSeries("cpu", map[string]string{"region": "east"})
idx.AddSeries("cpu", map[string]string{"region": "west", "secret": "foo"})
idx.AddSeries("disk", map[string]string{"secret": "foo"})
idx.AddSeries("mem", map[string]string{"region": "west"})
idx.AddSeries("gpu", map[string]string{"region": "east"})
idx.AddSeries("pci", map[string]string{"region": "east", "secret": "foo"})
indexes[name] = idx
defer idx.Close()
}
authorizer := &internal.AuthorizerMock{
AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool {
if tags.GetString("secret") != "" {
t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags)
return false
}
return true
},
}
type example struct {
name string
expr influxql.Expr
expected [][]byte
}
// These examples should be run without any auth.
examples := []example{
{name: "all", expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")},
{name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("cpu", "mem")},
{name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "pci")},
{name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem", "pci")},
{name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "pci")},
// None of the series have this tag so all should be selected.
{name: "EQ empty", expr: influxql.MustParseExpr(`host = ''`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")},
// Measurements that have this tag at all should be returned.
{name: "NEQ empty", expr: influxql.MustParseExpr(`region != ''`), expected: slices.StringsToBytes("cpu", "gpu", "mem", "pci")},
{name: "EQREGEX empty", expr: influxql.MustParseExpr(`host =~ /.*/`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")},
{name: "NEQ empty", expr: influxql.MustParseExpr(`region !~ /.*/`), expected: slices.StringsToBytes()},
}
// These examples should be run with the authorizer.
authExamples := []example{
{name: "all", expected: slices.StringsToBytes("cpu", "gpu", "mem")},
{name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("mem")},
{name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("cpu", "gpu")},
{name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem")},
{name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("cpu", "gpu")},
{name: "EQ empty", expr: influxql.MustParseExpr(`host = ''`), expected: slices.StringsToBytes("cpu", "gpu", "mem")},
{name: "NEQ empty", expr: influxql.MustParseExpr(`region != ''`), expected: slices.StringsToBytes("cpu", "gpu", "mem")},
{name: "EQREGEX empty", expr: influxql.MustParseExpr(`host =~ /.*/`), expected: slices.StringsToBytes("cpu", "gpu", "mem")},
{name: "NEQ empty", expr: influxql.MustParseExpr(`region !~ /.*/`), expected: slices.StringsToBytes()},
}
for _, idx := range tsdb.RegisteredIndexes() {
t.Run(idx, func(t *testing.T) {
t.Run("no authorization", func(t *testing.T) {
for _, example := range examples {
t.Run(example.name, func(t *testing.T) {
names, err := indexes[idx].IndexSet().MeasurementNamesByPredicate(nil, example.expr)
if err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(names, example.expected) {
t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected))
}
})
}
})
t.Run("with authorization", func(t *testing.T) {
for _, example := range authExamples {
t.Run(example.name, func(t *testing.T) {
names, err := indexes[idx].IndexSet().MeasurementNamesByPredicate(authorizer, example.expr)
if err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(names, example.expected) {
t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected))
2017-11-16 14:44:33 +00:00
}
})
}
})
})
}
}
func TestIndexSet_DedupeInmemIndexes(t *testing.T) {
testCases := []struct {
tsiN int // Quantity of TSI indexes
inmem1N int // Quantity of ShardIndexes proxying the first inmem Index
inmem2N int // Quantity of ShardIndexes proxying the second inmem Index
uniqueN int // Quantity of total, deduplicated indexes
}{
{tsiN: 1, inmem1N: 0, uniqueN: 1},
{tsiN: 2, inmem1N: 0, uniqueN: 2},
{tsiN: 0, inmem1N: 1, uniqueN: 1},
{tsiN: 0, inmem1N: 2, uniqueN: 1},
{tsiN: 0, inmem1N: 1, inmem2N: 1, uniqueN: 2},
{tsiN: 0, inmem1N: 2, inmem2N: 2, uniqueN: 2},
{tsiN: 2, inmem1N: 2, inmem2N: 2, uniqueN: 4},
}
for _, testCase := range testCases {
name := fmt.Sprintf("%d/%d/%d -> %d", testCase.tsiN, testCase.inmem1N, testCase.inmem2N, testCase.uniqueN)
t.Run(name, func(t *testing.T) {
var indexes []tsdb.Index
for i := 0; i < testCase.tsiN; i++ {
indexes = append(indexes, MustOpenNewIndex(tsi1.IndexName))
}
if testCase.inmem1N > 0 {
sfile := MustOpenSeriesFile()
opts := tsdb.NewEngineOptions()
opts.IndexVersion = inmem.IndexName
opts.InmemIndex = inmem.NewIndex("db", sfile.SeriesFile)
for i := 0; i < testCase.inmem1N; i++ {
indexes = append(indexes, inmem.NewShardIndex(uint64(i), tsdb.NewSeriesIDSet(), opts))
}
}
if testCase.inmem2N > 0 {
sfile := MustOpenSeriesFile()
opts := tsdb.NewEngineOptions()
opts.IndexVersion = inmem.IndexName
opts.InmemIndex = inmem.NewIndex("db", sfile.SeriesFile)
for i := 0; i < testCase.inmem2N; i++ {
indexes = append(indexes, inmem.NewShardIndex(uint64(i), tsdb.NewSeriesIDSet(), opts))
}
}
is := tsdb.IndexSet{Indexes: indexes}.DedupeInmemIndexes()
if len(is.Indexes) != testCase.uniqueN {
t.Errorf("expected %d indexes, got %d", testCase.uniqueN, len(is.Indexes))
}
})
}
}
2018-02-02 13:24:49 +00:00
func TestIndex_Sketches(t *testing.T) {
checkCardinalities := func(t *testing.T, index *Index, state string, series, tseries, measurements, tmeasurements int) {
t.Helper()
2018-02-02 13:24:49 +00:00
// Get sketches and check cardinality...
sketch, tsketch, err := index.SeriesSketches()
if err != nil {
t.Fatal(err)
}
// delta calculates a rough 10% delta. If i is small then a minimum value
// of 2 is used.
delta := func(i int) int {
v := i / 10
if v == 0 {
v = 2
}
return v
}
// series cardinality should be well within 10%.
if got, exp := int(sketch.Count()), series; got-exp < -delta(series) || got-exp > delta(series) {
t.Errorf("[%s] got series cardinality %d, expected ~%d", state, got, exp)
}
// check series tombstones
if got, exp := int(tsketch.Count()), tseries; got-exp < -delta(tseries) || got-exp > delta(tseries) {
t.Errorf("[%s] got series tombstone cardinality %d, expected ~%d", state, got, exp)
}
// Check measurement cardinality.
if sketch, tsketch, err = index.MeasurementsSketches(); err != nil {
t.Fatal(err)
}
if got, exp := int(sketch.Count()), measurements; got != exp { //got-exp < -delta(measurements) || got-exp > delta(measurements) {
t.Errorf("[%s] got measurement cardinality %d, expected ~%d", state, got, exp)
}
if got, exp := int(tsketch.Count()), tmeasurements; got != exp { //got-exp < -delta(tmeasurements) || got-exp > delta(tmeasurements) {
t.Errorf("[%s] got measurement tombstone cardinality %d, expected ~%d", state, got, exp)
}
}
test := func(t *testing.T, index string) error {
idx := MustNewIndex(index)
if index, ok := idx.Index.(*tsi1.Index); ok {
// Override the log file max size to force a log file compaction sooner.
// This way, we will test the sketches are correct when they have been
// compacted into IndexFiles, and also when they're loaded from
// IndexFiles after a re-open.
tsi1.WithMaximumLogFileSize(1 << 10)(index)
}
// Open the index
idx.MustOpen()
defer idx.Close()
series := genTestSeries(10, 5, 3)
// Add series to index.
for _, serie := range series {
if err := idx.AddSeries(serie.Measurement, serie.Tags.Map()); err != nil {
t.Fatal(err)
}
}
// Check cardinalities after adding series.
checkCardinalities(t, idx, "initial", 2430, 0, 10, 0)
// Re-open step only applies to the TSI index.
if _, ok := idx.Index.(*tsi1.Index); ok {
// Re-open the index.
if err := idx.Reopen(); err != nil {
panic(err)
}
// Check cardinalities after the reopen
checkCardinalities(t, idx, "initial|reopen", 2430, 0, 10, 0)
}
// Drop some series
if err := idx.DropMeasurement([]byte("measurement2")); err != nil {
return err
} else if err := idx.DropMeasurement([]byte("measurement5")); err != nil {
return err
}
// Check cardinalities after the delete
checkCardinalities(t, idx, "initial|reopen|delete", 2430, 486, 10, 2)
2018-02-02 13:24:49 +00:00
// Re-open step only applies to the TSI index.
if _, ok := idx.Index.(*tsi1.Index); ok {
// Re-open the index.
if err := idx.Reopen(); err != nil {
panic(err)
}
// Check cardinalities after the reopen
checkCardinalities(t, idx, "initial|reopen|delete|reopen", 2430, 486, 10, 2)
2018-02-02 13:24:49 +00:00
}
return nil
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
if err := test(t, index); err != nil {
t.Fatal(err)
}
})
}
}
// Index wraps a series file and index.
2017-11-16 14:44:33 +00:00
type Index struct {
tsdb.Index
2018-02-02 13:24:49 +00:00
rootPath string
indexType string
sfile *tsdb.SeriesFile
2017-11-16 14:44:33 +00:00
}
type EngineOption func(opts *tsdb.EngineOptions)
// DisableTSICache allows the caller to disable the TSI bitset cache during a test.
var DisableTSICache = func() EngineOption {
return func(opts *tsdb.EngineOptions) {
opts.Config.SeriesIDSetCacheSize = 0
}
}
2018-02-02 13:24:49 +00:00
// MustNewIndex will initialize a new index using the provide type. It creates
// everything under the same root directory so it can be cleanly removed on Close.
//
// The index will not be opened.
func MustNewIndex(index string, eopts ...EngineOption) *Index {
2017-11-16 14:44:33 +00:00
opts := tsdb.NewEngineOptions()
opts.IndexVersion = index
for _, opt := range eopts {
opt(&opts)
}
2022-03-31 21:17:57 +00:00
rootPath, err := os.MkdirTemp("", "influxdb-tsdb")
2017-12-08 17:11:07 +00:00
if err != nil {
panic(err)
2017-11-16 14:44:33 +00:00
}
2022-03-31 21:17:57 +00:00
seriesPath, err := os.MkdirTemp(rootPath, tsdb.SeriesFileDirectory)
2017-11-16 14:44:33 +00:00
if err != nil {
panic(err)
}
2017-12-29 18:57:30 +00:00
sfile := tsdb.NewSeriesFile(seriesPath)
2017-12-08 17:11:07 +00:00
if err := sfile.Open(); err != nil {
panic(err)
}
if index == inmem.IndexName {
opts.InmemIndex = inmem.NewIndex("db0", sfile)
}
2018-02-02 13:24:49 +00:00
i, err := tsdb.NewIndex(0, "db0", filepath.Join(rootPath, "index"), tsdb.NewSeriesIDSet(), sfile, opts)
if err != nil {
panic(err)
}
2018-02-05 18:51:03 +00:00
if testing.Verbose() {
i.WithLogger(logger.New(os.Stderr))
}
2017-12-08 17:11:07 +00:00
idx := &Index{
2018-02-02 13:24:49 +00:00
Index: i,
indexType: index,
rootPath: rootPath,
sfile: sfile,
2017-12-08 17:11:07 +00:00
}
2017-11-16 14:44:33 +00:00
return idx
}
2018-02-02 13:24:49 +00:00
// MustOpenNewIndex will initialize a new index using the provide type and opens
// it.
func MustOpenNewIndex(index string, opts ...EngineOption) *Index {
idx := MustNewIndex(index, opts...)
2018-02-02 13:24:49 +00:00
idx.MustOpen()
return idx
}
// MustOpen opens the underlying index or panics.
func (i *Index) MustOpen() {
if err := i.Index.Open(); err != nil {
panic(err)
}
}
2017-12-12 21:22:42 +00:00
func (idx *Index) IndexSet() *tsdb.IndexSet {
return &tsdb.IndexSet{Indexes: []tsdb.Index{idx.Index}, SeriesFile: idx.sfile}
}
2017-11-16 14:44:33 +00:00
func (idx *Index) AddSeries(name string, tags map[string]string) error {
t := models.NewTags(tags)
key := fmt.Sprintf("%s,%s", name, t.HashKey())
feat: series creation ingress metrics (#20700) After turning this on and testing locally, note the 'seriesCreated' metric "localStore": {"name":"localStore","tags":null,"values":{"pointsWritten":2987,"seriesCreated":58,"valuesWritten":23754}}, "ingress": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"cq","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":4}}, "ingress:1": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"database","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":4}}, "ingress:2": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"httpd","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":46}}, "ingress:3": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"ingress","rp":"monitor"},"values":{"pointsWritten":14,"seriesCreated":14,"valuesWritten":42}}, "ingress:4": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"localStore","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":6}}, "ingress:5": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"queryExecutor","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":10}}, "ingress:6": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"runtime","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":30}}, "ingress:7": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"shard","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":22}}, "ingress:8": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"subscriber","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":6}}, "ingress:9": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_cache","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":18}}, "ingress:10": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_engine","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":58}}, "ingress:11": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_filestore","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":4}}, "ingress:12": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_wal","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":8}}, "ingress:13": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"write","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":18}}, "ingress:14": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"cpu","rp":"autogen"},"values":{"pointsWritten":1342,"seriesCreated":13,"valuesWritten":13420}}, "ingress:15": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"disk","rp":"autogen"},"values":{"pointsWritten":642,"seriesCreated":6,"valuesWritten":4494}}, "ingress:16": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"diskio","rp":"autogen"},"values":{"pointsWritten":214,"seriesCreated":2,"valuesWritten":2354}}, "ingress:17": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"mem","rp":"autogen"},"values":{"pointsWritten":107,"seriesCreated":1,"valuesWritten":963}}, "ingress:18": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"processes","rp":"autogen"},"values":{"pointsWritten":107,"seriesCreated":1,"valuesWritten":856}}, "ingress:19": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"swap","rp":"autogen"},"values":{"pointsWritten":214,"seriesCreated":1,"valuesWritten":642}}, "ingress:20": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"system","rp":"autogen"},"values":{"pointsWritten":321,"seriesCreated":1,"valuesWritten":749}}, Closes: https://github.com/influxdata/influxdb/issues/20613
2021-02-05 18:52:43 +00:00
return idx.CreateSeriesIfNotExists([]byte(key), []byte(name), t, tsdb.NoopStatsTracker())
2017-11-16 14:44:33 +00:00
}
2017-12-08 17:11:07 +00:00
2018-02-02 13:24:49 +00:00
// Reopen closes and re-opens the underlying index, without removing any data.
func (i *Index) Reopen() error {
if err := i.Index.Close(); err != nil {
return err
}
if err := i.sfile.Close(); err != nil {
return err
}
i.sfile = tsdb.NewSeriesFile(i.sfile.Path())
if err := i.sfile.Open(); err != nil {
return err
}
opts := tsdb.NewEngineOptions()
opts.IndexVersion = i.indexType
if i.indexType == inmem.IndexName {
opts.InmemIndex = inmem.NewIndex("db0", i.sfile)
}
idx, err := tsdb.NewIndex(0, "db0", filepath.Join(i.rootPath, "index"), tsdb.NewSeriesIDSet(), i.sfile, opts)
if err != nil {
return err
}
i.Index = idx
return i.Index.Open()
}
// Close closes the index cleanly and removes all on-disk data.
2017-12-08 17:11:07 +00:00
func (i *Index) Close() error {
if err := i.Index.Close(); err != nil {
return err
}
if err := i.sfile.Close(); err != nil {
return err
}
2018-02-05 18:51:03 +00:00
//return os.RemoveAll(i.rootPath)
return nil
2017-12-08 17:11:07 +00:00
}
// This benchmark compares the TagSets implementation across index types.
//
// In the case of the TSI index, TagSets has to merge results across all several
// index partitions.
//
// Typical results on an i7 laptop.
//
Reduce allocations in TSI TagSets implementation Since all tag sets are materialised to strings before this method returns, a large number of allocations can be avoided by carefully resuing buffers and containers. This commit reduces allocations by about 75%, which can be very significant for high cardinality workloads. The benchmark results shown below are for a benchmark that asks for all series keys matching `tag5=value0'. name old time/op new time/op delta Index_ConcurrentWriteQuery/inmem/queries_100000-8 5.66s ± 4% 5.70s ± 5% ~ (p=0.739 n=10+10) Index_ConcurrentWriteQuery/tsi1/queries_100000-8 26.5s ± 8% 26.8s ±12% ~ (p=0.579 n=10+10) IndexSet_TagSets/1M_series/inmem-8 11.9ms ±18% 10.4ms ± 2% -12.81% (p=0.000 n=10+10) IndexSet_TagSets/1M_series/tsi1-8 23.4ms ± 5% 18.9ms ± 1% -19.07% (p=0.000 n=10+9) name old alloc/op new alloc/op delta Index_ConcurrentWriteQuery/inmem/queries_100000-8 2.50GB ± 0% 2.50GB ± 0% ~ (p=0.315 n=10+10) Index_ConcurrentWriteQuery/tsi1/queries_100000-8 32.6GB ± 0% 32.6GB ± 0% ~ (p=0.247 n=10+10) IndexSet_TagSets/1M_series/inmem-8 3.56MB ± 0% 3.56MB ± 0% ~ (all equal) IndexSet_TagSets/1M_series/tsi1-8 12.7MB ± 0% 5.2MB ± 0% -59.02% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Index_ConcurrentWriteQuery/inmem/queries_100000-8 24.0M ± 0% 24.0M ± 0% ~ (p=0.353 n=10+10) Index_ConcurrentWriteQuery/tsi1/queries_100000-8 96.6M ± 0% 96.7M ± 0% ~ (p=0.579 n=10+10) IndexSet_TagSets/1M_series/inmem-8 51.0 ± 0% 51.0 ± 0% ~ (all equal) IndexSet_TagSets/1M_series/tsi1-8 80.4k ± 0% 20.4k ± 0% -74.65% (p=0.000 n=10+10)
2018-08-09 14:59:37 +00:00
// BenchmarkIndexSet_TagSets/1M_series/inmem-8 100 10430732 ns/op 3556728 B/op 51 allocs/op
// BenchmarkIndexSet_TagSets/1M_series/tsi1-8 100 18995530 ns/op 5221180 B/op 20379 allocs/op
func BenchmarkIndexSet_TagSets(b *testing.B) {
// Read line-protocol and coerce into tsdb format.
keys := make([][]byte, 0, 1e6)
names := make([][]byte, 0, 1e6)
tags := make([]models.Tags, 0, 1e6)
// 1M series generated with:
// $inch -b 10000 -c 1 -t 10,10,10,10,10,10 -f 1 -m 5 -p 1
fd, err := os.Open("testdata/line-protocol-1M.txt.gz")
if err != nil {
b.Fatal(err)
}
gzr, err := gzip.NewReader(fd)
if err != nil {
fd.Close()
b.Fatal(err)
}
2022-03-31 21:17:57 +00:00
data, err := io.ReadAll(gzr)
if err != nil {
b.Fatal(err)
}
if err := fd.Close(); err != nil {
b.Fatal(err)
}
points, err := models.ParsePoints(data)
if err != nil {
b.Fatal(err)
}
for _, pt := range points {
keys = append(keys, pt.Key())
names = append(names, pt.Name())
tags = append(tags, pt.Tags())
}
// setup writes all of the above points to the index.
setup := func(idx *Index) {
batchSize := 10000
for j := 0; j < 1; j++ {
for i := 0; i < len(keys); i += batchSize {
k := keys[i : i+batchSize]
n := names[i : i+batchSize]
t := tags[i : i+batchSize]
feat: series creation ingress metrics (#20700) After turning this on and testing locally, note the 'seriesCreated' metric "localStore": {"name":"localStore","tags":null,"values":{"pointsWritten":2987,"seriesCreated":58,"valuesWritten":23754}}, "ingress": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"cq","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":4}}, "ingress:1": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"database","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":4}}, "ingress:2": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"httpd","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":46}}, "ingress:3": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"ingress","rp":"monitor"},"values":{"pointsWritten":14,"seriesCreated":14,"valuesWritten":42}}, "ingress:4": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"localStore","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":6}}, "ingress:5": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"queryExecutor","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":10}}, "ingress:6": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"runtime","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":30}}, "ingress:7": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"shard","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":22}}, "ingress:8": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"subscriber","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":6}}, "ingress:9": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_cache","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":18}}, "ingress:10": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_engine","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":58}}, "ingress:11": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_filestore","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":4}}, "ingress:12": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_wal","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":8}}, "ingress:13": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"write","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":18}}, "ingress:14": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"cpu","rp":"autogen"},"values":{"pointsWritten":1342,"seriesCreated":13,"valuesWritten":13420}}, "ingress:15": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"disk","rp":"autogen"},"values":{"pointsWritten":642,"seriesCreated":6,"valuesWritten":4494}}, "ingress:16": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"diskio","rp":"autogen"},"values":{"pointsWritten":214,"seriesCreated":2,"valuesWritten":2354}}, "ingress:17": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"mem","rp":"autogen"},"values":{"pointsWritten":107,"seriesCreated":1,"valuesWritten":963}}, "ingress:18": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"processes","rp":"autogen"},"values":{"pointsWritten":107,"seriesCreated":1,"valuesWritten":856}}, "ingress:19": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"swap","rp":"autogen"},"values":{"pointsWritten":214,"seriesCreated":1,"valuesWritten":642}}, "ingress:20": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"system","rp":"autogen"},"values":{"pointsWritten":321,"seriesCreated":1,"valuesWritten":749}}, Closes: https://github.com/influxdata/influxdb/issues/20613
2021-02-05 18:52:43 +00:00
if err := idx.CreateSeriesListIfNotExists(k, n, t, tsdb.NoopStatsTracker()); err != nil {
b.Fatal(err)
}
}
}
}
// TODO(edd): refactor how we call into tag sets in the tsdb package.
type indexTagSets interface {
TagSets(name []byte, options query.IteratorOptions) ([]*query.TagSet, error)
}
var errResult error
// This benchmark will merge eight bitsets each containing ~10,000 series IDs.
b.Run("1M series", func(b *testing.B) {
b.ReportAllocs()
for _, indexType := range tsdb.RegisteredIndexes() {
idx := MustOpenNewIndex(indexType)
setup(idx)
name := []byte("m4")
opt := query.IteratorOptions{Condition: influxql.MustParseExpr(`"tag5"::tag = 'value0'`)}
indexSet := tsdb.IndexSet{
SeriesFile: idx.sfile,
Indexes: []tsdb.Index{idx.Index},
} // For TSI implementation
var ts func() ([]*query.TagSet, error)
// TODO(edd): this is somewhat awkward. We should unify this difference somewhere higher
// up than the engine. I don't want to open an engine do a benchmark on
// different index implementations.
2018-08-21 13:32:30 +00:00
if indexType == tsdb.InmemIndexName {
ts = func() ([]*query.TagSet, error) {
return idx.Index.(indexTagSets).TagSets(name, opt)
}
} else {
ts = func() ([]*query.TagSet, error) {
return indexSet.TagSets(idx.sfile, name, opt)
}
}
b.Run(indexType, func(b *testing.B) {
for i := 0; i < b.N; i++ {
// Will call TagSets on the appropriate implementation.
_, errResult = ts()
if errResult != nil {
b.Fatal(err)
}
}
})
if err := idx.Close(); err != nil {
b.Fatal(err)
}
}
})
}
// This benchmark concurrently writes series to the index and fetches cached bitsets.
// The idea is to emphasize the performance difference when bitset caching is on and off.
//
// Typical results for an i7 laptop
//
// BenchmarkIndex_ConcurrentWriteQuery/inmem/queries_100000/cache-8 1 5963346204 ns/op 2499655768 B/op 23964183 allocs/op
// BenchmarkIndex_ConcurrentWriteQuery/inmem/queries_100000/no_cache-8 1 5314841090 ns/op 2499495280 B/op 23963322 allocs/op
// BenchmarkIndex_ConcurrentWriteQuery/tsi1/queries_100000/cache-8 1 1645048376 ns/op 2215402840 B/op 23048978 allocs/op
// BenchmarkIndex_ConcurrentWriteQuery/tsi1/queries_100000/no_cache-8 1 22242155616 ns/op 28277544136 B/op 79620463 allocs/op
func BenchmarkIndex_ConcurrentWriteQuery(b *testing.B) {
// Read line-protocol and coerce into tsdb format.
keys := make([][]byte, 0, 1e6)
names := make([][]byte, 0, 1e6)
tags := make([]models.Tags, 0, 1e6)
// 1M series generated with:
// $inch -b 10000 -c 1 -t 10,10,10,10,10,10 -f 1 -m 5 -p 1
fd, err := os.Open("testdata/line-protocol-1M.txt.gz")
if err != nil {
b.Fatal(err)
}
gzr, err := gzip.NewReader(fd)
if err != nil {
fd.Close()
b.Fatal(err)
}
2022-03-31 21:17:57 +00:00
data, err := io.ReadAll(gzr)
if err != nil {
b.Fatal(err)
}
if err := fd.Close(); err != nil {
b.Fatal(err)
}
points, err := models.ParsePoints(data)
if err != nil {
b.Fatal(err)
}
for _, pt := range points {
keys = append(keys, pt.Key())
names = append(names, pt.Name())
tags = append(tags, pt.Tags())
}
runBenchmark := func(b *testing.B, index string, queryN int, useTSICache bool) {
var idx *Index
if !useTSICache {
idx = MustOpenNewIndex(index, DisableTSICache())
} else {
idx = MustOpenNewIndex(index)
}
var wg sync.WaitGroup
begin := make(chan struct{})
// Run concurrent iterator...
runIter := func() {
keys := [][]string{
{"m0", "tag2", "value4"},
{"m1", "tag3", "value5"},
{"m2", "tag4", "value6"},
{"m3", "tag0", "value8"},
{"m4", "tag5", "value0"},
}
<-begin // Wait for writes to land
for i := 0; i < queryN/5; i++ {
for _, key := range keys {
itr, err := idx.TagValueSeriesIDIterator([]byte(key[0]), []byte(key[1]), []byte(key[2]))
if err != nil {
b.Fatal(err)
}
if itr == nil {
panic("should not happen")
}
if err := itr.Close(); err != nil {
b.Fatal(err)
}
}
}
}
batchSize := 10000
wg.Add(1)
go func() { defer wg.Done(); runIter() }()
var once sync.Once
for j := 0; j < b.N; j++ {
for i := 0; i < len(keys); i += batchSize {
k := keys[i : i+batchSize]
n := names[i : i+batchSize]
t := tags[i : i+batchSize]
feat: series creation ingress metrics (#20700) After turning this on and testing locally, note the 'seriesCreated' metric "localStore": {"name":"localStore","tags":null,"values":{"pointsWritten":2987,"seriesCreated":58,"valuesWritten":23754}}, "ingress": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"cq","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":4}}, "ingress:1": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"database","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":4}}, "ingress:2": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"httpd","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":46}}, "ingress:3": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"ingress","rp":"monitor"},"values":{"pointsWritten":14,"seriesCreated":14,"valuesWritten":42}}, "ingress:4": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"localStore","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":6}}, "ingress:5": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"queryExecutor","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":10}}, "ingress:6": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"runtime","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":30}}, "ingress:7": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"shard","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":22}}, "ingress:8": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"subscriber","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":6}}, "ingress:9": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_cache","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":18}}, "ingress:10": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_engine","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":58}}, "ingress:11": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_filestore","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":4}}, "ingress:12": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"tsm1_wal","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":2,"valuesWritten":8}}, "ingress:13": {"name":"ingress","tags":{"db":"_internal","login":"_systemuser_monitor","measurement":"write","rp":"monitor"},"values":{"pointsWritten":2,"seriesCreated":1,"valuesWritten":18}}, "ingress:14": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"cpu","rp":"autogen"},"values":{"pointsWritten":1342,"seriesCreated":13,"valuesWritten":13420}}, "ingress:15": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"disk","rp":"autogen"},"values":{"pointsWritten":642,"seriesCreated":6,"valuesWritten":4494}}, "ingress:16": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"diskio","rp":"autogen"},"values":{"pointsWritten":214,"seriesCreated":2,"valuesWritten":2354}}, "ingress:17": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"mem","rp":"autogen"},"values":{"pointsWritten":107,"seriesCreated":1,"valuesWritten":963}}, "ingress:18": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"processes","rp":"autogen"},"values":{"pointsWritten":107,"seriesCreated":1,"valuesWritten":856}}, "ingress:19": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"swap","rp":"autogen"},"values":{"pointsWritten":214,"seriesCreated":1,"valuesWritten":642}}, "ingress:20": {"name":"ingress","tags":{"db":"telegraf","login":"_systemuser_unknown","measurement":"system","rp":"autogen"},"values":{"pointsWritten":321,"seriesCreated":1,"valuesWritten":749}}, Closes: https://github.com/influxdata/influxdb/issues/20613
2021-02-05 18:52:43 +00:00
if err := idx.CreateSeriesListIfNotExists(k, n, t, tsdb.NoopStatsTracker()); err != nil {
b.Fatal(err)
}
once.Do(func() { close(begin) })
}
// Wait for queries to finish
wg.Wait()
// Reset the index...
b.StopTimer()
if err := idx.Close(); err != nil {
b.Fatal(err)
}
// Re-open everything
idx = MustOpenNewIndex(index)
wg.Add(1)
begin = make(chan struct{})
once = sync.Once{}
go func() { defer wg.Done(); runIter() }()
b.StartTimer()
}
}
queries := []int{1e5}
for _, indexType := range tsdb.RegisteredIndexes() {
b.Run(indexType, func(b *testing.B) {
for _, queryN := range queries {
b.Run(fmt.Sprintf("queries %d", queryN), func(b *testing.B) {
b.Run("cache", func(b *testing.B) {
runBenchmark(b, indexType, queryN, true)
})
b.Run("no cache", func(b *testing.B) {
runBenchmark(b, indexType, queryN, false)
})
})
}
})
}
}