fix(tsi1/partition/test): fix data race in test code (#25288)

pull/25334/head
WeblWabl 2024-09-11 19:48:41 -05:00 committed by GitHub
parent b233f51045
commit 7dc8b1d648
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 84 additions and 63 deletions

View File

@ -177,7 +177,8 @@ testcase bare_last {
)
result = csv.from(csv: input)
|> testing.load()
|> range(start: -100y)
|> range(start: 2015-01-01T00:00:00Z)
|> filter(fn: (r) => r._measurement == "pge_bill" and r._field == "bank")
|> last()
|> keep(columns: ["_time", "_value", "_field", "_measurement"])

View File

@ -39,7 +39,8 @@ const ManifestFileName = "MANIFEST"
// Partition represents a collection of layered index files and WAL.
type Partition struct {
mu sync.RWMutex
// exported for tests
Mu sync.RWMutex
opened bool
sfile *tsdb.SeriesFile // series lookup file
@ -150,8 +151,8 @@ var ErrIncompatibleVersion = errors.New("incompatible tsi1 index MANIFEST")
// Open opens the partition.
func (p *Partition) Open() (rErr error) {
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
p.closing = make(chan struct{})
@ -339,8 +340,8 @@ func (p *Partition) buildSeriesSet() error {
// CurrentCompactionN returns the number of compactions currently running.
func (p *Partition) CurrentCompactionN() int {
p.mu.RLock()
defer p.mu.RUnlock()
p.Mu.RLock()
defer p.Mu.RUnlock()
return p.currentCompactionN
}
@ -367,8 +368,8 @@ func (p *Partition) Close() error {
p.Wait()
// Lock index and close remaining
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
var err error
@ -402,8 +403,8 @@ func (p *Partition) SeriesFile() *tsdb.SeriesFile { return p.sfile }
// NextSequence returns the next file identifier.
func (p *Partition) NextSequence() int {
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
return p.nextSequence()
}
@ -445,8 +446,8 @@ func (p *Partition) manifest(newFileSet *FileSet) *Manifest {
// SetManifestPathForTest is only to force a bad path in testing
func (p *Partition) SetManifestPathForTest(path string) {
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
p.manifestPathFn = func() string { return path }
}
@ -457,16 +458,16 @@ func (p *Partition) WithLogger(logger *zap.Logger) {
// SetFieldSet sets a shared field set from the engine.
func (p *Partition) SetFieldSet(fs *tsdb.MeasurementFieldSet) {
p.mu.Lock()
p.Mu.Lock()
p.fieldset = fs
p.mu.Unlock()
p.Mu.Unlock()
}
// FieldSet returns the fieldset.
func (p *Partition) FieldSet() *tsdb.MeasurementFieldSet {
p.mu.Lock()
p.Mu.Lock()
fs := p.fieldset
p.mu.Unlock()
p.Mu.Unlock()
return fs
}
@ -476,8 +477,8 @@ func (p *Partition) RetainFileSet() (*FileSet, error) {
case <-p.closing:
return nil, tsdb.ErrIndexClosing
default:
p.mu.RLock()
defer p.mu.RUnlock()
p.Mu.RLock()
defer p.Mu.RUnlock()
return p.retainFileSet(), nil
}
}
@ -490,8 +491,8 @@ func (p *Partition) retainFileSet() *FileSet {
// FileN returns the active files in the file set.
func (p *Partition) FileN() int {
p.mu.RLock()
defer p.mu.RUnlock()
p.Mu.RLock()
defer p.Mu.RUnlock()
return len(p.fileSet.files)
}
@ -508,7 +509,7 @@ func (p *Partition) prependActiveLogFile() (rErr error) {
// Prepend and generate new fileset but do not yet update the partition
newFileSet := p.fileSet.PrependLogFile(f)
errors2.Capture(&rErr, func() error {
defer errors2.Capture(&rErr, func() error {
if rErr != nil {
// close the new file.
f.Close()
@ -671,12 +672,12 @@ func (p *Partition) DropMeasurement(name []byte) error {
// Mark measurement as deleted.
entries = append(entries, LogEntry{Flag: LogEntryMeasurementTombstoneFlag, Name: name})
p.mu.RLock()
p.Mu.RLock()
if err := p.activeLogFile.Writes(entries); err != nil {
p.mu.RUnlock()
p.Mu.RUnlock()
return err
}
p.mu.RUnlock()
p.Mu.RUnlock()
// Check if the log file needs to be swapped.
if err := p.CheckLogFile(); err != nil {
@ -704,14 +705,14 @@ func (p *Partition) createSeriesListIfNotExists(names [][]byte, tagsSlice []mode
defer fs.Release()
// Ensure fileset cannot change during insert.
p.mu.RLock()
p.Mu.RLock()
// Insert series into log file.
ids, err := p.activeLogFile.AddSeriesList(p.seriesIDSet, names, tagsSlice, tracker)
if err != nil {
p.mu.RUnlock()
p.Mu.RUnlock()
return nil, err
}
p.mu.RUnlock()
p.Mu.RUnlock()
if err := p.CheckLogFile(); err != nil {
return nil, err
@ -722,8 +723,8 @@ func (p *Partition) createSeriesListIfNotExists(names [][]byte, tagsSlice []mode
func (p *Partition) DropSeries(seriesID uint64) error {
// Delete series from index.
if err := func() error {
p.mu.RLock()
defer p.mu.RUnlock()
p.Mu.RLock()
defer p.Mu.RUnlock()
return p.activeLogFile.DeleteSeriesID(seriesID)
}(); err != nil {
return err
@ -742,8 +743,8 @@ func (p *Partition) DropSeriesList(seriesIDs []uint64) error {
// Delete series from index.
if err := func() error {
p.mu.RLock()
defer p.mu.RUnlock()
p.Mu.RLock()
defer p.Mu.RUnlock()
return p.activeLogFile.DeleteSeriesIDList(seriesIDs)
}(); err != nil {
return err
@ -910,14 +911,14 @@ func (p *Partition) AssignShard(k string, shardID uint64) {}
// Compact requests a compaction of log files.
func (p *Partition) Compact() {
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
p.compact()
}
func (p *Partition) DisableCompactions() {
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
p.compactionsDisabled++
select {
@ -933,8 +934,8 @@ func (p *Partition) DisableCompactions() {
}
func (p *Partition) EnableCompactions() {
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
// Already enabled?
if p.compactionsEnabled() {
@ -952,9 +953,9 @@ func (p *Partition) runPeriodicCompaction() {
p.Compact()
// Avoid a race when using Reopen in tests
p.mu.RLock()
p.Mu.RLock()
closing := p.closing
p.mu.RUnlock()
p.Mu.RUnlock()
// check for compactions once an hour (usually not necessary but a nice safety check)
t := time.NewTicker(1 * time.Hour)
@ -977,8 +978,8 @@ func (p *Partition) runPeriodicCompaction() {
// If checkRunning = true, only count as needing a compaction if there is not a compaction already
// in progress for the level that would be compacted
func (p *Partition) NeedsCompaction(checkRunning bool) bool {
p.mu.RLock()
defer p.mu.RUnlock()
p.Mu.RLock()
defer p.Mu.RUnlock()
if p.needsLogCompaction() {
return true
}
@ -1031,10 +1032,10 @@ func (p *Partition) compact() {
p.currentCompactionN++
go func() {
p.compactLogFile(logFile)
p.mu.Lock()
p.Mu.Lock()
p.currentCompactionN--
p.levelCompacting[0] = false
p.mu.Unlock()
p.Mu.Unlock()
p.Compact()
}()
}
@ -1074,10 +1075,10 @@ func (p *Partition) compact() {
p.compactToLevel(files, level+1, interrupt)
// Ensure compaction lock for the level is released.
p.mu.Lock()
p.Mu.Lock()
p.levelCompacting[level] = false
p.currentCompactionN--
p.mu.Unlock()
p.Mu.Unlock()
// Check for new compactions
p.Compact()
@ -1155,8 +1156,8 @@ func (p *Partition) compactToLevel(files []*IndexFile, level int, interrupt <-ch
// Obtain lock to swap in index file and write manifest.
if err := func() (rErr error) {
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
// Replace previous files with new index file.
newFileSet := p.fileSet.MustReplace(IndexFiles(files).Files(), file)
@ -1220,8 +1221,8 @@ func (p *Partition) needsLogCompaction() bool {
func (p *Partition) CheckLogFile() error {
// Check log file under read lock.
needsCompaction := func() bool {
p.mu.RLock()
defer p.mu.RUnlock()
p.Mu.RLock()
defer p.Mu.RUnlock()
return p.needsLogCompaction()
}()
if !needsCompaction {
@ -1229,8 +1230,8 @@ func (p *Partition) CheckLogFile() error {
}
// If file size exceeded then recheck under write lock and swap files.
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
return p.checkLogFile()
}
@ -1260,9 +1261,9 @@ func (p *Partition) compactLogFile(logFile *LogFile) {
return
}
p.mu.Lock()
p.Mu.Lock()
interrupt := p.compactionInterrupt
p.mu.Unlock()
p.Mu.Unlock()
start := time.Now()
@ -1312,8 +1313,8 @@ func (p *Partition) compactLogFile(logFile *LogFile) {
// Obtain lock to swap in index file and write manifest.
if err := func() (rErr error) {
p.mu.Lock()
defer p.mu.Unlock()
p.Mu.Lock()
defer p.Mu.Unlock()
// Replace previous log file with index file.
newFileSet := p.fileSet.MustReplace([]File{logFile}, file)

View File

@ -74,9 +74,19 @@ func TestPartition_Open(t *testing.T) {
func TestPartition_Manifest(t *testing.T) {
t.Run("current MANIFEST", func(t *testing.T) {
sfile := MustOpenSeriesFile()
defer sfile.Close()
t.Cleanup(func() {
if err := sfile.Close(); err != nil {
t.Fatalf("error closing series file %v", err)
}
})
p := MustOpenPartition(sfile.SeriesFile)
t.Cleanup(func() {
if err := p.Close(); err != nil {
t.Fatalf("error closing partition %v", err)
}
})
if got, exp := p.Manifest().Version, tsi1.Version; got != exp {
t.Fatalf("got MANIFEST version %d, expected %d", got, exp)
}
@ -98,14 +108,17 @@ func TestPartition_Manifest_Write_Fail(t *testing.T) {
func TestPartition_PrependLogFile_Write_Fail(t *testing.T) {
t.Run("write MANIFEST", func(t *testing.T) {
sfile := MustOpenSeriesFile()
defer sfile.Close()
t.Cleanup(func() {
if err := sfile.Close(); err != nil {
t.Fatalf("error closing series file %v", err)
}
})
p := MustOpenPartition(sfile.SeriesFile)
defer func() {
t.Cleanup(func() {
if err := p.Close(); err != nil {
t.Fatalf("error closing partition: %v", err)
}
}()
})
p.Partition.MaxLogFileSize = -1
fileN := p.FileN()
p.CheckLogFile()
@ -124,15 +137,21 @@ func TestPartition_PrependLogFile_Write_Fail(t *testing.T) {
func TestPartition_Compact_Write_Fail(t *testing.T) {
t.Run("write MANIFEST", func(t *testing.T) {
sfile := MustOpenSeriesFile()
defer sfile.Close()
t.Cleanup(func() {
if err := sfile.Close(); err != nil {
t.Fatalf("error closing series file %v", err)
}
})
p := MustOpenPartition(sfile.SeriesFile)
defer func() {
t.Cleanup(func() {
if err := p.Close(); err != nil {
t.Fatalf("error closing partition: %v", err)
}
}()
})
p.Partition.Mu.Lock()
p.Partition.MaxLogFileSize = -1
p.Partition.Mu.Unlock()
fileN := p.FileN()
p.Compact()
if (1 + fileN) != p.FileN() {