Use unbuffered error channels in tests

pull/7837/head
Edd Robinson 2017-01-17 10:49:53 -08:00
parent fb7388cdfc
commit feb7a2842c
7 changed files with 84 additions and 43 deletions

View File

@ -258,7 +258,7 @@ func TestClient_Concurrent_Use(t *testing.T) {
wg.Add(3) wg.Add(3)
n := 1000 n := 1000
errC := make(chan error, 3) errC := make(chan error)
go func() { go func() {
defer wg.Done() defer wg.Done()
bp, err := NewBatchPoints(BatchPointsConfig{}) bp, err := NewBatchPoints(BatchPointsConfig{})
@ -293,11 +293,15 @@ func TestClient_Concurrent_Use(t *testing.T) {
} }
}() }()
go func() {
wg.Wait() wg.Wait()
close(errC) close(errC)
if err := <-errC; err != nil { }()
t.Fatal(err)
for err := range errC {
if err != nil {
t.Error(err)
}
} }
} }

View File

@ -82,22 +82,27 @@ func BenchmarkLimitListener(b *testing.B) {
wg.Add(b.N) wg.Add(b.N)
l := httpd.LimitListener(&fakeListener{}, b.N) l := httpd.LimitListener(&fakeListener{}, b.N)
errC := make(chan error, 1) errC := make(chan error)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
go func() { go func() {
defer wg.Done()
c, err := l.Accept() c, err := l.Accept()
if err != nil { if err != nil {
errC <- err errC <- err
return return
} }
c.Close() c.Close()
wg.Done()
}() }()
} }
wg.Wait()
go func() {
wg.Wait()
close(errC) close(errC)
if err := <-errC; err != nil { }()
b.Fatal(err)
for err := range errC {
if err != nil {
b.Error(err)
}
} }
} }

View File

@ -43,7 +43,7 @@ func TestMux(t *testing.T) {
mux.Logger = log.New(ioutil.Discard, "", 0) mux.Logger = log.New(ioutil.Discard, "", 0)
} }
errC := make(chan error, n) errC := make(chan error)
for i := uint8(0); i < n; i++ { for i := uint8(0); i < n; i++ {
ln := mux.Listen(byte(i)) ln := mux.Listen(byte(i))
@ -121,14 +121,21 @@ func TestMux(t *testing.T) {
// Close original TCP listener and wait for all goroutines to close. // Close original TCP listener and wait for all goroutines to close.
tcpListener.Close() tcpListener.Close()
wg.Wait()
go func() {
wg.Wait()
close(errC) close(errC)
if err := <-errC; err != nil { }()
t.Fatal(err)
ok := true
for err := range errC {
if err != nil {
ok = false
t.Error(err)
}
} }
return true return ok
}, nil); err != nil { }, nil); err != nil {
t.Error(err) t.Error(err)
} }

View File

@ -86,7 +86,7 @@ func TestCacheRace(t *testing.T) {
}(s) }(s)
} }
errC := make(chan error, 1) errC := make(chan error)
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
@ -106,11 +106,16 @@ func TestCacheRace(t *testing.T) {
}() }()
close(ch) close(ch)
wg.Wait()
go func() {
wg.Wait()
close(errC) close(errC)
if err := <-errC; err != nil { }()
t.Fatal(err)
for err := range errC {
if err != nil {
t.Error(err)
}
} }
} }
@ -148,7 +153,7 @@ func TestCacheRace2Compacters(t *testing.T) {
fileCounter := 0 fileCounter := 0
mapFiles := map[int]bool{} mapFiles := map[int]bool{}
mu := sync.Mutex{} mu := sync.Mutex{}
errC := make(chan error, 1000) errC := make(chan error)
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
wg.Add(1) wg.Add(1)
go func() { go func() {
@ -187,10 +192,15 @@ func TestCacheRace2Compacters(t *testing.T) {
}() }()
} }
close(ch) close(ch)
wg.Wait()
go func() {
wg.Wait()
close(errC) close(errC)
if err := <-errC; err != nil { }()
t.Fatal(err)
for err := range errC {
if err != nil {
t.Error(err)
}
} }
} }

View File

@ -822,7 +822,7 @@ func benchmarkEngine_WritePoints_Parallel(b *testing.B, batchSize int) {
b.StartTimer() b.StartTimer()
var wg sync.WaitGroup var wg sync.WaitGroup
errC := make(chan error, cpus) errC := make(chan error)
for i := 0; i < cpus; i++ { for i := 0; i < cpus; i++ {
wg.Add(1) wg.Add(1)
go func(i int) { go func(i int) {
@ -835,11 +835,16 @@ func benchmarkEngine_WritePoints_Parallel(b *testing.B, batchSize int) {
} }
}(i) }(i)
} }
wg.Wait()
go func() {
wg.Wait()
close(errC) close(errC)
if err := <-errC; err != nil { }()
b.Fatal(err)
for err := range errC {
if err != nil {
b.Error(err)
}
} }
} }
} }

View File

@ -67,7 +67,7 @@ func benchmarkRingWrite(b *testing.B, r *ring, n int) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
var wg sync.WaitGroup var wg sync.WaitGroup
for i := 0; i < runtime.GOMAXPROCS(0); i++ { for i := 0; i < runtime.GOMAXPROCS(0); i++ {
errC := make(chan error, n) errC := make(chan error)
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
@ -77,10 +77,16 @@ func benchmarkRingWrite(b *testing.B, r *ring, n int) {
} }
} }
}() }()
go func() {
wg.Wait() wg.Wait()
close(errC) close(errC)
if err := <-errC; err != nil { }()
b.Fatal(err)
for err := range errC {
if err != nil {
b.Error(err)
}
} }
} }
} }

View File

@ -402,7 +402,7 @@ func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(2) wg.Add(2)
errC := make(chan error, 2) errC := make(chan error)
go func() { go func() {
defer wg.Done() defer wg.Done()
for i := 0; i < 50; i++ { for i := 0; i < 50; i++ {
@ -434,12 +434,16 @@ func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) {
} }
}() }()
go func() {
wg.Wait() wg.Wait()
close(errC) close(errC)
if err := <-errC; err != nil { }()
t.Fatal(err)
}
for err := range errC {
if err != nil {
t.Error(err)
}
}
} }
// Ensures that when a shard is closed, it removes any series meta-data // Ensures that when a shard is closed, it removes any series meta-data