Fixing more tests.

pull/9150/head
Ben Johnson 2017-12-04 10:29:04 -07:00
parent e0df47d54f
commit f5f85d65f9
No known key found for this signature in database
GPG Key ID: 81741CD251883081
4 changed files with 26 additions and 27 deletions

View File

@ -9,6 +9,7 @@ import (
"os"
"strconv"
"strings"
"sync"
"testing"
"time"
@ -27,11 +28,6 @@ func TestMain(m *testing.M) {
verboseServerLogs = *vv
var r int
for _, indexType = range tsdb.RegisteredIndexes() {
if indexType != "tsi1" {
println("dbg/skipping", indexType)
continue
}
// Setup benchmark server
c := NewConfig()
c.Retention.Enabled = false
@ -6360,13 +6356,14 @@ func TestServer_Query_Where_With_Tags(t *testing.T) {
},
}...)
for i, query := range test.queries {
var once sync.Once
for _, query := range test.queries {
t.Run(query.name, func(t *testing.T) {
if i == 0 {
once.Do(func() {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
})
if query.skip {
t.Skipf("SKIP:: %s", query.name)
}
@ -8100,13 +8097,14 @@ func TestServer_Query_ShowTagValues(t *testing.T) {
},
}...)
for i, query := range test.queries {
var once sync.Once
for _, query := range test.queries {
t.Run(query.name, func(t *testing.T) {
if i == 0 {
once.Do(func() {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
})
if query.skip {
t.Skipf("SKIP:: %s", query.name)
}

View File

@ -6,6 +6,7 @@ import (
"fmt"
"os"
"regexp"
"runtime/debug"
"sort"
"sync"
@ -502,11 +503,13 @@ func (itr *seriesIDUnionIterator) Next() (_ SeriesIDElem, err error) {
}
// Return non-zero or lesser series.
if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; b == 0 || a < b {
if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a == 0 && b == 0 {
return SeriesIDElem{}, nil
} else if b == 0 || (a != 0 && a < b) {
elem := itr.buf[0]
itr.buf[0].SeriesID = 0
return elem, nil
} else if a == 0 || a > b {
} else if a == 0 || (b != 0 && a > b) {
elem := itr.buf[1]
itr.buf[1].SeriesID = 0
return elem, nil
@ -640,6 +643,7 @@ func NewSeriesPointIterator(sfile *SeriesFile, indexSet IndexSet, fieldset *Meas
indexSet: indexSet,
fieldset: fieldset,
mitr: mitr,
sfile: sfile,
point: query.FloatPoint{
Aux: make([]interface{}, len(opt.Aux)),
},
@ -714,6 +718,8 @@ func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) {
itr.point.Aux[i] = key
}
}
return &itr.point, nil
}
}
@ -1296,7 +1302,7 @@ func (is IndexSet) matchTagValueEqualEmptySeriesIDIterator(name, key []byte, val
e, err := vitr.Next()
if err != nil {
return err
} else if e != nil {
} else if e == nil {
break
}
@ -1338,7 +1344,7 @@ func (is IndexSet) matchTagValueEqualNotEmptySeriesIDIterator(name, key []byte,
if err != nil {
SeriesIDIterators(itrs).Close()
return nil, err
} else if e != nil {
} else if e == nil {
break
}
@ -1369,7 +1375,7 @@ func (is IndexSet) matchTagValueNotEqualEmptySeriesIDIterator(name, key []byte,
if err != nil {
SeriesIDIterators(itrs).Close()
return nil, err
} else if e != nil {
} else if e == nil {
break
}
@ -1400,7 +1406,7 @@ func (is IndexSet) matchTagValueNotEqualNotEmptySeriesIDIterator(name, key []byt
if err != nil {
SeriesIDIterators(itrs).Close()
return nil, err
} else if e != nil {
} else if e == nil {
break
}
if value.Match(e) {
@ -1750,3 +1756,7 @@ type byTagKey []*query.TagSet
func (t byTagKey) Len() int { return len(t) }
func (t byTagKey) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) < 0 }
func (t byTagKey) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func stack() string {
return "------------------------\n" + string(debug.Stack()) + "------------------------\n\n"
}

View File

@ -736,15 +736,6 @@ func (i *Partition) UnassignShard(k string, shardID uint64, ts int64) error {
return i.DropSeries([]byte(k), ts)
}
/*
// seriesIDIterator returns an iterator over all matching series.
func (i *Partition) seriesPointIterator(opt query.IteratorOptions) tsdb.SeriesIDIterator {
// NOTE: The iterator handles releasing the file set.
fs := i.RetainFileSet()
return newSeriesPointIterator(fs, i.fieldset, opt), nil
}
*/
// Compact requests a compaction of log files.
func (i *Partition) Compact() {
i.mu.Lock()

View File

@ -1266,7 +1266,7 @@ func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxq
return e
}), nil)
// Get set of Shards to work on.
// Build index set to work on.
shards := make([]*Shard, 0, len(shardIDs))
s.mu.RLock()
for _, sid := range shardIDs {