Add more detailed logging for compactions

pull/3925/head
Paul Dix 2015-09-01 09:50:42 -04:00
parent d6cb01722b
commit 040fa060df
2 changed files with 10 additions and 2 deletions

View File

@ -947,7 +947,7 @@ func MapFirst(itr Iterator) interface{} {
v = nextv
}
nextk, nextv = itr.Next()
}
}
return &firstLastMapOutput{k, v}
}

View File

@ -1072,7 +1072,11 @@ func (p *Partition) flushAndCompact(flush flushType) error {
} else if flush == memoryFlush {
ftype = "memory"
}
p.log.logger.Printf("Flush due to %s. Flushing %d series with %d bytes from partition %d. Compacting %d series\n", ftype, len(c.seriesToFlush), c.flushSize, p.id, c.countCompacting)
pointCount := 0
for _, a := range c.seriesToFlush {
pointCount += len(a)
}
p.log.logger.Printf("Flush due to %s. Flushing %d series with %d points and %d bytes from partition %d. Compacting %d series\n", ftype, len(c.seriesToFlush), pointCount, c.flushSize, p.id, c.countCompacting)
}
// write the data to the index first
@ -1080,6 +1084,9 @@ func (p *Partition) flushAndCompact(flush flushType) error {
// if we can't write the index, we should just bring down the server hard
panic(fmt.Sprintf("error writing the wal to the index: %s", err.Error()))
}
if p.log.EnableLogging {
p.log.logger.Printf("write to index took of partition %d took %s\n", p.id, time.Since(startTime))
}
// clear the flush cache and reset the memory thresholds
p.mu.Lock()
@ -1094,6 +1101,7 @@ func (p *Partition) flushAndCompact(flush flushType) error {
p.mu.Unlock()
}()
startTime = time.Now()
err = p.compactFiles(c, flush)
if p.log.EnableLogging {
p.log.logger.Printf("compaction of partition %d took %s\n", p.id, time.Since(startTime))