From 07b87f26305eea7a562420aaa1c25e6767583c05 Mon Sep 17 00:00:00 2001 From: Mark Rushakoff Date: Mon, 19 Dec 2016 14:04:43 -0800 Subject: [PATCH] Miscellaneous lint cleanup --- cmd/influx/cli/cli.go | 6 +- cmd/influx_inspect/report/report.go | 5 - cmd/influx_tsm/tracker.go | 2 +- cmd/influxd/main.go | 10 +- internal/meta_client.go | 2 +- models/points.go | 2 +- services/continuous_querier/service.go | 2 +- services/meta/data.go | 5 +- services/meta/errors.go | 3 +- tsdb/engine/tsm1/cache.go | 2 +- tsdb/engine/tsm1/compact.go | 4 +- tsdb/engine/tsm1/engine.go | 4 +- tsdb/engine/tsm1/file_store.go | 8 +- tsdb/engine/tsm1/iterator.gen.go | 128 ++++++++++++------------- tsdb/engine/tsm1/iterator.gen.go.tmpl | 32 +++---- tsdb/engine/tsm1/timestamp.go | 4 +- tsdb/meta.go | 4 +- tsdb/shard.go | 42 ++++---- tsdb/store.go | 2 +- 19 files changed, 120 insertions(+), 147 deletions(-) diff --git a/cmd/influx/cli/cli.go b/cmd/influx/cli/cli.go index 90dd1a1397..4f59f0140d 100644 --- a/cmd/influx/cli/cli.go +++ b/cmd/influx/cli/cli.go @@ -174,7 +174,7 @@ func (c *CommandLine) Run() error { return fmt.Errorf("Failed to check token: %s", err.Error()) } if token == "" { - fmt.Printf(noTokenMsg) + fmt.Print(noTokenMsg) } fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion) @@ -803,9 +803,7 @@ func (c *CommandLine) formatResults(result client.Result, separator string) []st } } - for _, column := range row.Columns { - columnNames = append(columnNames, column) - } + columnNames = append(columnNames, row.Columns...) // Output a line separator if we have more than one set or results and format is column if i > 0 && c.Format == "column" { diff --git a/cmd/influx_inspect/report/report.go b/cmd/influx_inspect/report/report.go index 248250c6bc..90c2b94ac0 100644 --- a/cmd/influx_inspect/report/report.go +++ b/cmd/influx_inspect/report/report.go @@ -77,11 +77,6 @@ func (cmd *Command) Run(args ...string) error { measCardinalities := map[string]*hllpp.HLLPP{} fieldCardinalities := map[string]*hllpp.HLLPP{} - ordering := make([]chan struct{}, 0, len(files)) - for range files { - ordering = append(ordering, make(chan struct{})) - } - for _, f := range files { file, err := os.OpenFile(f, os.O_RDONLY, 0600) if err != nil { diff --git a/cmd/influx_tsm/tracker.go b/cmd/influx_tsm/tracker.go index 9048aa7647..b91d9b9370 100644 --- a/cmd/influx_tsm/tracker.go +++ b/cmd/influx_tsm/tracker.go @@ -53,7 +53,7 @@ func (t *tracker) Run() error { if err != nil { log.Fatalf("Backup of database %v failed: %v\n", db, err) } - log.Printf("Database %v backed up (%v)\n", db, time.Now().Sub(start)) + log.Printf("Database %v backed up (%v)\n", db, time.Since(start)) }) } t.wg.Wait() diff --git a/cmd/influxd/main.go b/cmd/influxd/main.go index 8859d12eaf..5c20ecf65d 100644 --- a/cmd/influxd/main.go +++ b/cmd/influxd/main.go @@ -94,13 +94,9 @@ func (m *Main) Run(args ...string) error { m.Logger.Info("Listening for signals") // Block until one of the signals above is received - select { - case <-signalCh: - m.Logger.Info("Signal received, initializing clean shutdown...") - go func() { - cmd.Close() - }() - } + <-signalCh + m.Logger.Info("Signal received, initializing clean shutdown...") + go cmd.Close() // Block again until another signal is received, a shutdown timeout elapses, // or the Command is gracefully closed diff --git a/internal/meta_client.go b/internal/meta_client.go index e7bac425ca..f411dad5d2 100644 --- a/internal/meta_client.go +++ b/internal/meta_client.go @@ -87,7 +87,7 @@ func (c *MetaClientMock) Databases() []meta.DatabaseInfo { } func (c *MetaClientMock) DeleteShardGroup(database string, policy string, id uint64) error { - return c.DeleteShardGroup(database, policy, id) + return c.DeleteShardGroupFn(database, policy, id) } func (c *MetaClientMock) DropContinuousQuery(database, name string) error { diff --git a/models/points.go b/models/points.go index e44b7a3224..2ba7076238 100644 --- a/models/points.go +++ b/models/points.go @@ -235,7 +235,7 @@ func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision strin pt, err := parsePoint(block[start:], defaultTime, precision) if err != nil { - failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err)) + failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err)) } else { points = append(points, pt) } diff --git a/services/continuous_querier/service.go b/services/continuous_querier/service.go index a5e2662595..a3b9b899ab 100644 --- a/services/continuous_querier/service.go +++ b/services/continuous_querier/service.go @@ -352,7 +352,7 @@ func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.Conti } if s.loggingEnabled { - s.Logger.Info(fmt.Sprintf("finished continuous query %s (%v to %v) in %s", cq.Info.Name, startTime, endTime, time.Now().Sub(start))) + s.Logger.Info(fmt.Sprintf("finished continuous query %s (%v to %v) in %s", cq.Info.Name, startTime, endTime, time.Since(start))) } return nil } diff --git a/services/meta/data.go b/services/meta/data.go index d8eb415cef..e01257d65a 100644 --- a/services/meta/data.go +++ b/services/meta/data.go @@ -903,10 +903,7 @@ func (s *RetentionPolicySpec) Matches(rpi *RetentionPolicyInfo) bool { // Normalize with the retention policy info's duration instead of the spec // since they should be the same and we're performing a comparison. sgDuration := normalisedShardDuration(s.ShardGroupDuration, rpi.Duration) - if sgDuration != rpi.ShardGroupDuration { - return false - } - return true + return sgDuration == rpi.ShardGroupDuration } // marshal serializes to a protobuf representation. diff --git a/services/meta/errors.go b/services/meta/errors.go index 2a8b752fef..489e5eb875 100644 --- a/services/meta/errors.go +++ b/services/meta/errors.go @@ -48,8 +48,7 @@ var ( // ErrRetentionPolicyDurationTooLow is returned when updating a retention // policy that has a duration lower than the allowed minimum. - ErrRetentionPolicyDurationTooLow = errors.New(fmt.Sprintf("retention policy duration must be at least %s", - MinRetentionPolicyDuration)) + ErrRetentionPolicyDurationTooLow = fmt.Errorf("retention policy duration must be at least %s", MinRetentionPolicyDuration) // ErrRetentionPolicyConflict is returned when creating a retention policy conflicts // with an existing policy. diff --git a/tsdb/engine/tsm1/cache.go b/tsdb/engine/tsm1/cache.go index 297bb8d476..3375e99ef2 100644 --- a/tsdb/engine/tsm1/cache.go +++ b/tsdb/engine/tsm1/cache.go @@ -649,7 +649,7 @@ func (cl *CacheLoader) WithLogger(log zap.Logger) { func (c *Cache) UpdateAge() { c.mu.RLock() defer c.mu.RUnlock() - ageStat := int64(time.Now().Sub(c.lastSnapshot) / time.Millisecond) + ageStat := int64(time.Since(c.lastSnapshot) / time.Millisecond) atomic.StoreInt64(&c.stats.CacheAgeMs, ageStat) } diff --git a/tsdb/engine/tsm1/compact.go b/tsdb/engine/tsm1/compact.go index bf1ee42410..3bfde84056 100644 --- a/tsdb/engine/tsm1/compact.go +++ b/tsdb/engine/tsm1/compact.go @@ -288,7 +288,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup { generations := c.findGenerations() // first check if we should be doing a full compaction because nothing has been written in a long time - if c.CompactFullWriteColdDuration > 0 && time.Now().Sub(lastWrite) > c.CompactFullWriteColdDuration && len(generations) > 1 { + if c.CompactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.CompactFullWriteColdDuration && len(generations) > 1 { var tsmFiles []string var genCount int for i, group := range generations { @@ -1208,7 +1208,7 @@ func (k *tsmKeyIterator) combine(dedup bool) blocks { } func (k *tsmKeyIterator) chunk(dst blocks) blocks { - for len(k.mergedValues) > k.size { + if len(k.mergedValues) > k.size { values := k.mergedValues[:k.size] cb, err := Values(values).Encode(nil) if err != nil { diff --git a/tsdb/engine/tsm1/engine.go b/tsdb/engine/tsm1/engine.go index 80e8f14e99..9842f1b021 100644 --- a/tsdb/engine/tsm1/engine.go +++ b/tsdb/engine/tsm1/engine.go @@ -842,7 +842,7 @@ func (e *Engine) WriteSnapshot() error { defer func() { if started != nil { - e.Cache.UpdateCompactTime(time.Now().Sub(*started)) + e.Cache.UpdateCompactTime(time.Since(*started)) e.logger.Info(fmt.Sprintf("Snapshot for path %s written in %v", e.path, time.Since(*started))) } }() @@ -970,7 +970,7 @@ func (e *Engine) ShouldCompactCache(lastWriteTime time.Time) bool { } return sz > e.CacheFlushMemorySizeThreshold || - time.Now().Sub(lastWriteTime) > e.CacheFlushWriteColdDuration + time.Since(lastWriteTime) > e.CacheFlushWriteColdDuration } func (e *Engine) compactTSMLevel(fast bool, level int, quit <-chan struct{}) { diff --git a/tsdb/engine/tsm1/file_store.go b/tsdb/engine/tsm1/file_store.go index 73f4a36201..57e9e6c523 100644 --- a/tsdb/engine/tsm1/file_store.go +++ b/tsdb/engine/tsm1/file_store.go @@ -415,7 +415,7 @@ func (f *FileStore) Open() error { go func(idx int, file *os.File) { start := time.Now() df, err := NewTSMReader(file) - f.logger.Info(fmt.Sprintf("%s (#%d) opened in %v", file.Name(), idx, time.Now().Sub(start))) + f.logger.Info(fmt.Sprintf("%s (#%d) opened in %v", file.Name(), idx, time.Since(start))) if err != nil { readerC <- &res{r: df, err: fmt.Errorf("error opening memory map for file %s: %v", file.Name(), err)} @@ -525,10 +525,8 @@ func (f *FileStore) Replace(oldFiles, newFiles []string) error { // and load the new files. We copy the pointers here to minimize // the time that locks are held as well as to ensure that the replacement // is atomic.© - var updated []TSMFile - for _, t := range f.files { - updated = append(updated, t) - } + updated := make([]TSMFile, len(f.files)) + copy(updated, f.files) // Rename all the new files to make them live on restart for _, file := range newFiles { diff --git a/tsdb/engine/tsm1/iterator.gen.go b/tsdb/engine/tsm1/iterator.gen.go index 66c7c8470c..812edf0496 100644 --- a/tsdb/engine/tsm1/iterator.gen.go +++ b/tsdb/engine/tsm1/iterator.gen.go @@ -271,24 +271,22 @@ func (itr *floatLimitIterator) Stats() influxql.IteratorStats { return itr.input func (itr *floatLimitIterator) Close() error { return itr.input.Close() } func (itr *floatLimitIterator) Next() (*influxql.FloatPoint, error) { - for { - // Check if we are beyond the limit. - if (itr.n - itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil + // Check if we are beyond the limit. + if (itr.n - itr.opt.Offset) > itr.opt.Limit { + return nil, nil } + + // Read the next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Increment counter. + itr.n++ + + // Offsets are handled by a higher level iterator so return all points. + return p, nil } // floatCursor represents an object for iterating over a single float field. @@ -714,24 +712,22 @@ func (itr *integerLimitIterator) Stats() influxql.IteratorStats { return itr.inp func (itr *integerLimitIterator) Close() error { return itr.input.Close() } func (itr *integerLimitIterator) Next() (*influxql.IntegerPoint, error) { - for { - // Check if we are beyond the limit. - if (itr.n - itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil + // Check if we are beyond the limit. + if (itr.n - itr.opt.Offset) > itr.opt.Limit { + return nil, nil } + + // Read the next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Increment counter. + itr.n++ + + // Offsets are handled by a higher level iterator so return all points. + return p, nil } // integerCursor represents an object for iterating over a single integer field. @@ -1157,24 +1153,22 @@ func (itr *stringLimitIterator) Stats() influxql.IteratorStats { return itr.inpu func (itr *stringLimitIterator) Close() error { return itr.input.Close() } func (itr *stringLimitIterator) Next() (*influxql.StringPoint, error) { - for { - // Check if we are beyond the limit. - if (itr.n - itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil + // Check if we are beyond the limit. + if (itr.n - itr.opt.Offset) > itr.opt.Limit { + return nil, nil } + + // Read the next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Increment counter. + itr.n++ + + // Offsets are handled by a higher level iterator so return all points. + return p, nil } // stringCursor represents an object for iterating over a single string field. @@ -1600,24 +1594,22 @@ func (itr *booleanLimitIterator) Stats() influxql.IteratorStats { return itr.inp func (itr *booleanLimitIterator) Close() error { return itr.input.Close() } func (itr *booleanLimitIterator) Next() (*influxql.BooleanPoint, error) { - for { - // Check if we are beyond the limit. - if (itr.n - itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil + // Check if we are beyond the limit. + if (itr.n - itr.opt.Offset) > itr.opt.Limit { + return nil, nil } + + // Read the next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Increment counter. + itr.n++ + + // Offsets are handled by a higher level iterator so return all points. + return p, nil } // booleanCursor represents an object for iterating over a single boolean field. diff --git a/tsdb/engine/tsm1/iterator.gen.go.tmpl b/tsdb/engine/tsm1/iterator.gen.go.tmpl index 2e1f5c0c20..a0fa177f18 100644 --- a/tsdb/engine/tsm1/iterator.gen.go.tmpl +++ b/tsdb/engine/tsm1/iterator.gen.go.tmpl @@ -267,24 +267,22 @@ func (itr *{{.name}}LimitIterator) Stats() influxql.IteratorStats { return itr.i func (itr *{{.name}}LimitIterator) Close() error { return itr.input.Close() } func (itr *{{.name}}LimitIterator) Next() (*influxql.{{.Name}}Point, error) { - for { - // Check if we are beyond the limit. - if (itr.n-itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil + // Check if we are beyond the limit. + if (itr.n-itr.opt.Offset) > itr.opt.Limit { + return nil, nil } + + // Read the next point. + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err + } + + // Increment counter. + itr.n++ + + // Offsets are handled by a higher level iterator so return all points. + return p, nil } // {{.name}}Cursor represents an object for iterating over a single {{.name}} field. diff --git a/tsdb/engine/tsm1/timestamp.go b/tsdb/engine/tsm1/timestamp.go index fc64b7f326..c0d55fe95f 100644 --- a/tsdb/engine/tsm1/timestamp.go +++ b/tsdb/engine/tsm1/timestamp.go @@ -142,7 +142,9 @@ func (e *encoder) Bytes() ([]byte, error) { func (e *encoder) encodePacked(div uint64, dts []uint64) ([]byte, error) { for _, v := range dts[1:] { - e.enc.Write(uint64(v) / div) + if err := e.enc.Write(uint64(v) / div); err != nil { + return nil, err + } } // The compressed deltas diff --git a/tsdb/meta.go b/tsdb/meta.go index 178a5af8c8..6b32f7bf53 100644 --- a/tsdb/meta.go +++ b/tsdb/meta.go @@ -1143,7 +1143,7 @@ type FilterExprs map[uint64]influxql.Expr // DeleteBoolLiteralTrues deletes all elements whose filter expression is a boolean literal true. func (fe FilterExprs) DeleteBoolLiteralTrues() { for id, expr := range fe { - if e, ok := expr.(*influxql.BooleanLiteral); ok && e.Val == true { + if e, ok := expr.(*influxql.BooleanLiteral); ok && e.Val { delete(fe, id) } } @@ -1262,7 +1262,7 @@ func expandExprWithValues(expr influxql.Expr, keys []string, tagExprs []tagExpr, // Reduce using the current tag key/value set. // Ignore it if reduces down to "false". e := influxql.Reduce(expr, &tagValuer{tags: m}) - if e, ok := e.(*influxql.BooleanLiteral); ok && e.Val == false { + if e, ok := e.(*influxql.BooleanLiteral); ok && !e.Val { return nil } diff --git a/tsdb/shard.go b/tsdb/shard.go index 2f436721a0..05415393c8 100644 --- a/tsdb/shard.go +++ b/tsdb/shard.go @@ -262,7 +262,7 @@ func (s *Shard) Open() error { s.engine = e - s.logger.Info(fmt.Sprintf("%s database index loaded in %s", s.path, time.Now().Sub(start))) + s.logger.Info(fmt.Sprintf("%s database index loaded in %s", s.path, time.Since(start))) go s.monitor() @@ -1169,30 +1169,28 @@ func (itr *seriesIterator) Close() error { return nil } // Next emits the next point in the iterator. func (itr *seriesIterator) Next() (*influxql.FloatPoint, error) { - for { - // Load next measurement's keys if there are no more remaining. - if itr.keys.i >= len(itr.keys.buf) { - if err := itr.nextKeys(); err != nil { - return nil, err - } - if len(itr.keys.buf) == 0 { - return nil, nil - } + // Load next measurement's keys if there are no more remaining. + if itr.keys.i >= len(itr.keys.buf) { + if err := itr.nextKeys(); err != nil { + return nil, err } - - // Read the next key. - key := itr.keys.buf[itr.keys.i] - itr.keys.i++ - - // Write auxiliary fields. - for i, f := range itr.opt.Aux { - switch f.Val { - case "key": - itr.point.Aux[i] = key - } + if len(itr.keys.buf) == 0 { + return nil, nil } - return &itr.point, nil } + + // Read the next key. + key := itr.keys.buf[itr.keys.i] + itr.keys.i++ + + // Write auxiliary fields. + for i, f := range itr.opt.Aux { + switch f.Val { + case "key": + itr.point.Aux[i] = key + } + } + return &itr.point, nil } // nextKeys reads all keys for the next measurement. diff --git a/tsdb/store.go b/tsdb/store.go index c6c0ce09ed..27c9bc2162 100644 --- a/tsdb/store.go +++ b/tsdb/store.go @@ -194,7 +194,7 @@ func (s *Store) loadShards() error { } resC <- &res{s: shard} - s.Logger.Info(fmt.Sprintf("%s opened in %s", path, time.Now().Sub(start))) + s.Logger.Info(fmt.Sprintf("%s opened in %s", path, time.Since(start))) }(s.databaseIndexes[db], db, rp.Name(), sh.Name()) } }