Miscellaneous lint cleanup
parent
f715692d7a
commit
07b87f2630
|
@ -174,7 +174,7 @@ func (c *CommandLine) Run() error {
|
|||
return fmt.Errorf("Failed to check token: %s", err.Error())
|
||||
}
|
||||
if token == "" {
|
||||
fmt.Printf(noTokenMsg)
|
||||
fmt.Print(noTokenMsg)
|
||||
}
|
||||
fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion)
|
||||
|
||||
|
@ -803,9 +803,7 @@ func (c *CommandLine) formatResults(result client.Result, separator string) []st
|
|||
}
|
||||
}
|
||||
|
||||
for _, column := range row.Columns {
|
||||
columnNames = append(columnNames, column)
|
||||
}
|
||||
columnNames = append(columnNames, row.Columns...)
|
||||
|
||||
// Output a line separator if we have more than one set or results and format is column
|
||||
if i > 0 && c.Format == "column" {
|
||||
|
|
|
@ -77,11 +77,6 @@ func (cmd *Command) Run(args ...string) error {
|
|||
measCardinalities := map[string]*hllpp.HLLPP{}
|
||||
fieldCardinalities := map[string]*hllpp.HLLPP{}
|
||||
|
||||
ordering := make([]chan struct{}, 0, len(files))
|
||||
for range files {
|
||||
ordering = append(ordering, make(chan struct{}))
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
file, err := os.OpenFile(f, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
|
|
|
@ -53,7 +53,7 @@ func (t *tracker) Run() error {
|
|||
if err != nil {
|
||||
log.Fatalf("Backup of database %v failed: %v\n", db, err)
|
||||
}
|
||||
log.Printf("Database %v backed up (%v)\n", db, time.Now().Sub(start))
|
||||
log.Printf("Database %v backed up (%v)\n", db, time.Since(start))
|
||||
})
|
||||
}
|
||||
t.wg.Wait()
|
||||
|
|
|
@ -94,13 +94,9 @@ func (m *Main) Run(args ...string) error {
|
|||
m.Logger.Info("Listening for signals")
|
||||
|
||||
// Block until one of the signals above is received
|
||||
select {
|
||||
case <-signalCh:
|
||||
m.Logger.Info("Signal received, initializing clean shutdown...")
|
||||
go func() {
|
||||
cmd.Close()
|
||||
}()
|
||||
}
|
||||
<-signalCh
|
||||
m.Logger.Info("Signal received, initializing clean shutdown...")
|
||||
go cmd.Close()
|
||||
|
||||
// Block again until another signal is received, a shutdown timeout elapses,
|
||||
// or the Command is gracefully closed
|
||||
|
|
|
@ -87,7 +87,7 @@ func (c *MetaClientMock) Databases() []meta.DatabaseInfo {
|
|||
}
|
||||
|
||||
func (c *MetaClientMock) DeleteShardGroup(database string, policy string, id uint64) error {
|
||||
return c.DeleteShardGroup(database, policy, id)
|
||||
return c.DeleteShardGroupFn(database, policy, id)
|
||||
}
|
||||
|
||||
func (c *MetaClientMock) DropContinuousQuery(database, name string) error {
|
||||
|
|
|
@ -235,7 +235,7 @@ func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision strin
|
|||
|
||||
pt, err := parsePoint(block[start:], defaultTime, precision)
|
||||
if err != nil {
|
||||
failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err))
|
||||
failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err))
|
||||
} else {
|
||||
points = append(points, pt)
|
||||
}
|
||||
|
|
|
@ -352,7 +352,7 @@ func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.Conti
|
|||
}
|
||||
|
||||
if s.loggingEnabled {
|
||||
s.Logger.Info(fmt.Sprintf("finished continuous query %s (%v to %v) in %s", cq.Info.Name, startTime, endTime, time.Now().Sub(start)))
|
||||
s.Logger.Info(fmt.Sprintf("finished continuous query %s (%v to %v) in %s", cq.Info.Name, startTime, endTime, time.Since(start)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -903,10 +903,7 @@ func (s *RetentionPolicySpec) Matches(rpi *RetentionPolicyInfo) bool {
|
|||
// Normalize with the retention policy info's duration instead of the spec
|
||||
// since they should be the same and we're performing a comparison.
|
||||
sgDuration := normalisedShardDuration(s.ShardGroupDuration, rpi.Duration)
|
||||
if sgDuration != rpi.ShardGroupDuration {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return sgDuration == rpi.ShardGroupDuration
|
||||
}
|
||||
|
||||
// marshal serializes to a protobuf representation.
|
||||
|
|
|
@ -48,8 +48,7 @@ var (
|
|||
|
||||
// ErrRetentionPolicyDurationTooLow is returned when updating a retention
|
||||
// policy that has a duration lower than the allowed minimum.
|
||||
ErrRetentionPolicyDurationTooLow = errors.New(fmt.Sprintf("retention policy duration must be at least %s",
|
||||
MinRetentionPolicyDuration))
|
||||
ErrRetentionPolicyDurationTooLow = fmt.Errorf("retention policy duration must be at least %s", MinRetentionPolicyDuration)
|
||||
|
||||
// ErrRetentionPolicyConflict is returned when creating a retention policy conflicts
|
||||
// with an existing policy.
|
||||
|
|
|
@ -649,7 +649,7 @@ func (cl *CacheLoader) WithLogger(log zap.Logger) {
|
|||
func (c *Cache) UpdateAge() {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
ageStat := int64(time.Now().Sub(c.lastSnapshot) / time.Millisecond)
|
||||
ageStat := int64(time.Since(c.lastSnapshot) / time.Millisecond)
|
||||
atomic.StoreInt64(&c.stats.CacheAgeMs, ageStat)
|
||||
}
|
||||
|
||||
|
|
|
@ -288,7 +288,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup {
|
|||
generations := c.findGenerations()
|
||||
|
||||
// first check if we should be doing a full compaction because nothing has been written in a long time
|
||||
if c.CompactFullWriteColdDuration > 0 && time.Now().Sub(lastWrite) > c.CompactFullWriteColdDuration && len(generations) > 1 {
|
||||
if c.CompactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.CompactFullWriteColdDuration && len(generations) > 1 {
|
||||
var tsmFiles []string
|
||||
var genCount int
|
||||
for i, group := range generations {
|
||||
|
@ -1208,7 +1208,7 @@ func (k *tsmKeyIterator) combine(dedup bool) blocks {
|
|||
}
|
||||
|
||||
func (k *tsmKeyIterator) chunk(dst blocks) blocks {
|
||||
for len(k.mergedValues) > k.size {
|
||||
if len(k.mergedValues) > k.size {
|
||||
values := k.mergedValues[:k.size]
|
||||
cb, err := Values(values).Encode(nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -842,7 +842,7 @@ func (e *Engine) WriteSnapshot() error {
|
|||
|
||||
defer func() {
|
||||
if started != nil {
|
||||
e.Cache.UpdateCompactTime(time.Now().Sub(*started))
|
||||
e.Cache.UpdateCompactTime(time.Since(*started))
|
||||
e.logger.Info(fmt.Sprintf("Snapshot for path %s written in %v", e.path, time.Since(*started)))
|
||||
}
|
||||
}()
|
||||
|
@ -970,7 +970,7 @@ func (e *Engine) ShouldCompactCache(lastWriteTime time.Time) bool {
|
|||
}
|
||||
|
||||
return sz > e.CacheFlushMemorySizeThreshold ||
|
||||
time.Now().Sub(lastWriteTime) > e.CacheFlushWriteColdDuration
|
||||
time.Since(lastWriteTime) > e.CacheFlushWriteColdDuration
|
||||
}
|
||||
|
||||
func (e *Engine) compactTSMLevel(fast bool, level int, quit <-chan struct{}) {
|
||||
|
|
|
@ -415,7 +415,7 @@ func (f *FileStore) Open() error {
|
|||
go func(idx int, file *os.File) {
|
||||
start := time.Now()
|
||||
df, err := NewTSMReader(file)
|
||||
f.logger.Info(fmt.Sprintf("%s (#%d) opened in %v", file.Name(), idx, time.Now().Sub(start)))
|
||||
f.logger.Info(fmt.Sprintf("%s (#%d) opened in %v", file.Name(), idx, time.Since(start)))
|
||||
|
||||
if err != nil {
|
||||
readerC <- &res{r: df, err: fmt.Errorf("error opening memory map for file %s: %v", file.Name(), err)}
|
||||
|
@ -525,10 +525,8 @@ func (f *FileStore) Replace(oldFiles, newFiles []string) error {
|
|||
// and load the new files. We copy the pointers here to minimize
|
||||
// the time that locks are held as well as to ensure that the replacement
|
||||
// is atomic.©
|
||||
var updated []TSMFile
|
||||
for _, t := range f.files {
|
||||
updated = append(updated, t)
|
||||
}
|
||||
updated := make([]TSMFile, len(f.files))
|
||||
copy(updated, f.files)
|
||||
|
||||
// Rename all the new files to make them live on restart
|
||||
for _, file := range newFiles {
|
||||
|
|
|
@ -271,24 +271,22 @@ func (itr *floatLimitIterator) Stats() influxql.IteratorStats { return itr.input
|
|||
func (itr *floatLimitIterator) Close() error { return itr.input.Close() }
|
||||
|
||||
func (itr *floatLimitIterator) Next() (*influxql.FloatPoint, error) {
|
||||
for {
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n - itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n - itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// floatCursor represents an object for iterating over a single float field.
|
||||
|
@ -714,24 +712,22 @@ func (itr *integerLimitIterator) Stats() influxql.IteratorStats { return itr.inp
|
|||
func (itr *integerLimitIterator) Close() error { return itr.input.Close() }
|
||||
|
||||
func (itr *integerLimitIterator) Next() (*influxql.IntegerPoint, error) {
|
||||
for {
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n - itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n - itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// integerCursor represents an object for iterating over a single integer field.
|
||||
|
@ -1157,24 +1153,22 @@ func (itr *stringLimitIterator) Stats() influxql.IteratorStats { return itr.inpu
|
|||
func (itr *stringLimitIterator) Close() error { return itr.input.Close() }
|
||||
|
||||
func (itr *stringLimitIterator) Next() (*influxql.StringPoint, error) {
|
||||
for {
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n - itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n - itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// stringCursor represents an object for iterating over a single string field.
|
||||
|
@ -1600,24 +1594,22 @@ func (itr *booleanLimitIterator) Stats() influxql.IteratorStats { return itr.inp
|
|||
func (itr *booleanLimitIterator) Close() error { return itr.input.Close() }
|
||||
|
||||
func (itr *booleanLimitIterator) Next() (*influxql.BooleanPoint, error) {
|
||||
for {
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n - itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n - itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// booleanCursor represents an object for iterating over a single boolean field.
|
||||
|
|
|
@ -267,24 +267,22 @@ func (itr *{{.name}}LimitIterator) Stats() influxql.IteratorStats { return itr.i
|
|||
func (itr *{{.name}}LimitIterator) Close() error { return itr.input.Close() }
|
||||
|
||||
func (itr *{{.name}}LimitIterator) Next() (*influxql.{{.Name}}Point, error) {
|
||||
for {
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n-itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
// Check if we are beyond the limit.
|
||||
if (itr.n-itr.opt.Offset) > itr.opt.Limit {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the next point.
|
||||
p, err := itr.input.Next()
|
||||
if p == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment counter.
|
||||
itr.n++
|
||||
|
||||
// Offsets are handled by a higher level iterator so return all points.
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// {{.name}}Cursor represents an object for iterating over a single {{.name}} field.
|
||||
|
|
|
@ -142,7 +142,9 @@ func (e *encoder) Bytes() ([]byte, error) {
|
|||
|
||||
func (e *encoder) encodePacked(div uint64, dts []uint64) ([]byte, error) {
|
||||
for _, v := range dts[1:] {
|
||||
e.enc.Write(uint64(v) / div)
|
||||
if err := e.enc.Write(uint64(v) / div); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// The compressed deltas
|
||||
|
|
|
@ -1143,7 +1143,7 @@ type FilterExprs map[uint64]influxql.Expr
|
|||
// DeleteBoolLiteralTrues deletes all elements whose filter expression is a boolean literal true.
|
||||
func (fe FilterExprs) DeleteBoolLiteralTrues() {
|
||||
for id, expr := range fe {
|
||||
if e, ok := expr.(*influxql.BooleanLiteral); ok && e.Val == true {
|
||||
if e, ok := expr.(*influxql.BooleanLiteral); ok && e.Val {
|
||||
delete(fe, id)
|
||||
}
|
||||
}
|
||||
|
@ -1262,7 +1262,7 @@ func expandExprWithValues(expr influxql.Expr, keys []string, tagExprs []tagExpr,
|
|||
// Reduce using the current tag key/value set.
|
||||
// Ignore it if reduces down to "false".
|
||||
e := influxql.Reduce(expr, &tagValuer{tags: m})
|
||||
if e, ok := e.(*influxql.BooleanLiteral); ok && e.Val == false {
|
||||
if e, ok := e.(*influxql.BooleanLiteral); ok && !e.Val {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -262,7 +262,7 @@ func (s *Shard) Open() error {
|
|||
|
||||
s.engine = e
|
||||
|
||||
s.logger.Info(fmt.Sprintf("%s database index loaded in %s", s.path, time.Now().Sub(start)))
|
||||
s.logger.Info(fmt.Sprintf("%s database index loaded in %s", s.path, time.Since(start)))
|
||||
|
||||
go s.monitor()
|
||||
|
||||
|
@ -1169,30 +1169,28 @@ func (itr *seriesIterator) Close() error { return nil }
|
|||
|
||||
// Next emits the next point in the iterator.
|
||||
func (itr *seriesIterator) Next() (*influxql.FloatPoint, error) {
|
||||
for {
|
||||
// Load next measurement's keys if there are no more remaining.
|
||||
if itr.keys.i >= len(itr.keys.buf) {
|
||||
if err := itr.nextKeys(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(itr.keys.buf) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
// Load next measurement's keys if there are no more remaining.
|
||||
if itr.keys.i >= len(itr.keys.buf) {
|
||||
if err := itr.nextKeys(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read the next key.
|
||||
key := itr.keys.buf[itr.keys.i]
|
||||
itr.keys.i++
|
||||
|
||||
// Write auxiliary fields.
|
||||
for i, f := range itr.opt.Aux {
|
||||
switch f.Val {
|
||||
case "key":
|
||||
itr.point.Aux[i] = key
|
||||
}
|
||||
if len(itr.keys.buf) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return &itr.point, nil
|
||||
}
|
||||
|
||||
// Read the next key.
|
||||
key := itr.keys.buf[itr.keys.i]
|
||||
itr.keys.i++
|
||||
|
||||
// Write auxiliary fields.
|
||||
for i, f := range itr.opt.Aux {
|
||||
switch f.Val {
|
||||
case "key":
|
||||
itr.point.Aux[i] = key
|
||||
}
|
||||
}
|
||||
return &itr.point, nil
|
||||
}
|
||||
|
||||
// nextKeys reads all keys for the next measurement.
|
||||
|
|
|
@ -194,7 +194,7 @@ func (s *Store) loadShards() error {
|
|||
}
|
||||
|
||||
resC <- &res{s: shard}
|
||||
s.Logger.Info(fmt.Sprintf("%s opened in %s", path, time.Now().Sub(start)))
|
||||
s.Logger.Info(fmt.Sprintf("%s opened in %s", path, time.Since(start)))
|
||||
}(s.databaseIndexes[db], db, rp.Name(), sh.Name())
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue