use underscore vs period, fix doc comment, add database name to CQ

pull/9477/head
Stuart Carnie 2018-02-26 10:08:43 -07:00
parent 89a88d218e
commit a74d296200
9 changed files with 26 additions and 22 deletions

2
.gitignore vendored
View File

@ -74,3 +74,5 @@ man/*.1.gz
# test outputs
/test-results.xml
/prof

View File

@ -13,26 +13,26 @@ const (
TraceIDKey = "trace_id"
// OperationNameKey is the logging context key used for identifying name of an operation.
OperationNameKey = "op.name"
OperationNameKey = "op_name"
// OperationEventKey is the logging context key used for identifying a notable
// event during the course of an operation.
OperationEventKey = "op.event"
OperationEventKey = "op_event"
// OperationElapsedKey is the logging context key used for identifying time elapsed to finish an operation.
OperationElapsedKey = "op.elapsed"
OperationElapsedKey = "op_elapsed"
// DBInstanceKey is the logging context key used for identifying name of the relevant database.
DBInstanceKey = "db.instance"
DBInstanceKey = "db_instance"
// DBRetentionKey is the logging context key used for identifying name of the relevant retention policy.
DBRetentionKey = "db.rp"
DBRetentionKey = "db_rp"
// DBShardGroupKey is the logging context key used for identifying relevant shard group.
DBShardGroupKey = "db.shard_group"
DBShardGroupKey = "db_shard_group"
// DBShardIDKey is the logging context key used for identifying name of the relevant shard group.
DBShardIDKey = "db.shard_id"
// DBShardIDKey is the logging context key used for identifying name of the relevant shard number.
DBShardIDKey = "db_shard_id"
)
const (
eventStart = "start"

View File

@ -370,11 +370,12 @@ func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.Conti
if s.loggingEnabled {
var logEnd func()
log, logEnd = logger.NewOperation(s.Logger, "Continuous query execution", "continuous_querier.execute")
log, logEnd = logger.NewOperation(s.Logger, "Continuous query execution", "continuous_querier_execute")
defer logEnd()
log.Info("Executing continuous query",
zap.String("name", cq.Info.Name),
logger.Database(cq.Database),
zap.Time("start", startTime),
zap.Time("end", endTime))
}
@ -400,6 +401,7 @@ func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.Conti
if s.loggingEnabled {
log.Info("Finished continuous query",
zap.String("name", cq.Info.Name),
logger.Database(cq.Database),
zap.Int64("written", written),
zap.Time("start", startTime),
zap.Time("end", endTime),

View File

@ -80,7 +80,7 @@ func (s *Service) run() {
return
case <-ticker.C:
log, logEnd := logger.NewOperation(s.logger, "Retention policy deletion check", "retention.delete_check")
log, logEnd := logger.NewOperation(s.logger, "Retention policy deletion check", "retention_delete_check")
type deletionInfo struct {
db string

View File

@ -1561,7 +1561,7 @@ func (e *Engine) WriteSnapshot() error {
started := time.Now()
log, logEnd := logger.NewOperation(e.logger, "Cache snapshot", "cache.snapshot")
log, logEnd := logger.NewOperation(e.logger, "Cache snapshot", "cache_snapshot")
defer func() {
elapsed := time.Since(started)
e.Cache.UpdateCompactTime(elapsed)
@ -1888,12 +1888,12 @@ func (s *compactionStrategy) Apply() {
func (s *compactionStrategy) compactGroup() {
group := s.group
start := time.Now()
log, logEnd := logger.NewOperation(s.logger, "TSM compaction", "tsm1.compact_group")
log, logEnd := logger.NewOperation(s.logger, "TSM compaction", "tsm1_compact_group")
defer logEnd()
log.Info("Beginning compaction", zap.Int("files", len(group)))
log.Info("Beginning compaction", zap.Int("tsm1_files", len(group)))
for i, f := range group {
log.Info("Compacting file", zap.Int("index", i), zap.String("file", f))
log.Info("Compacting file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
var (
@ -1932,7 +1932,7 @@ func (s *compactionStrategy) compactGroup() {
}
for i, f := range files {
log.Info("Compacted file", zap.Int("index", i), zap.String("file", f))
log.Info("Compacted file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
log.Info("Finished compacting files",
zap.Int("groups", len(group)),
@ -1946,7 +1946,7 @@ func (s *compactionStrategy) compactGroup() {
func (e *Engine) levelCompactionStrategy(group CompactionGroup, fast bool, level int) *compactionStrategy {
return &compactionStrategy{
group: group,
logger: e.logger.With(zap.Int("level", level), zap.String("strategy", "level")),
logger: e.logger.With(zap.Int("tsm1_level", level), zap.String("tsm1_strategy", "level")),
fileStore: e.FileStore,
compactor: e.Compactor,
fast: fast,
@ -1965,7 +1965,7 @@ func (e *Engine) levelCompactionStrategy(group CompactionGroup, fast bool, level
func (e *Engine) fullCompactionStrategy(group CompactionGroup, optimize bool) *compactionStrategy {
s := &compactionStrategy{
group: group,
logger: e.logger.With(zap.String("strategy", "full"), zap.Bool("optimize", optimize)),
logger: e.logger.With(zap.String("tsm1_strategy", "full"), zap.Bool("tsm1_optimize", optimize)),
fileStore: e.FileStore,
compactor: e.Compactor,
fast: optimize,

View File

@ -178,7 +178,7 @@ func (i *Index) Open() error {
p := NewPartition(i.sfile, filepath.Join(i.path, fmt.Sprint(j)))
p.MaxLogFileSize = i.maxLogFileSize
p.Database = i.database
p.logger = i.logger.With(zap.String("index", "tsi"), zap.String("partition", fmt.Sprint(j+1)))
p.logger = i.logger.With(zap.String("tsi1_partition", fmt.Sprint(j+1)))
i.partitions[j] = p
}

View File

@ -887,7 +887,7 @@ func (i *Partition) compactToLevel(files []*IndexFile, level int, interrupt <-ch
assert(level > 0, "cannot compact level zero")
// Build a logger for this compaction.
log, logEnd := logger.NewOperation(i.logger, "TSI level compaction", "index.tsi.compact_to_level", zap.Int("tsi_level", level))
log, logEnd := logger.NewOperation(i.logger, "TSI level compaction", "tsi1_compact_to_level", zap.Int("tsi1_level", level))
defer logEnd()
// Check for cancellation.
@ -1049,7 +1049,7 @@ func (i *Partition) compactLogFile(logFile *LogFile) {
assert(id != 0, "cannot parse log file id: %s", logFile.Path())
// Build a logger for this compaction.
log, logEnd := logger.NewOperation(i.logger, "TSI log compaction", "index.tsi.compact_log_file", zap.Int("log_file_id", id))
log, logEnd := logger.NewOperation(i.logger, "TSI log compaction", "tsi1_compact_log_file", zap.Int("tsi1_log_file_id", id))
defer logEnd()
// Create new index file.

View File

@ -258,7 +258,7 @@ func (p *SeriesPartition) CreateSeriesListIfNotExists(keys [][]byte, keyPartitio
// Check if we've crossed the compaction threshold.
if p.compactionsEnabled() && !p.compacting && p.CompactThreshold != 0 && p.index.InMemCount() >= uint64(p.CompactThreshold) {
p.compacting = true
log, logEnd := logger.NewOperation(p.Logger, "Series partition compaction", "series_partition.compaction", zap.String("path", p.path))
log, logEnd := logger.NewOperation(p.Logger, "Series partition compaction", "series_partition_compaction", zap.String("path", p.path))
p.wg.Add(1)
go func() {

View File

@ -204,7 +204,7 @@ func (s *Store) loadShards() error {
s.Logger.Info("Compaction throughput limit disabled")
}
log, logEnd := logger.NewOperation(s.Logger, "Open store", "tsdb.open")
log, logEnd := logger.NewOperation(s.Logger, "Open store", "tsdb_open")
defer logEnd()
t := limiter.NewFixed(runtime.GOMAXPROCS(0))