From a95647b720f68d9f5993ab505d1233064b984d65 Mon Sep 17 00:00:00 2001 From: Joe LeGasse Date: Wed, 30 Aug 2017 09:26:21 -0400 Subject: [PATCH] cleanup: remove poor usage of ',ok' with maps There are several places in the code where comma-ok map retrieval was being used poorly. Some were benign, like checking existence before issuing an unconditional delete with no cleanup. Others were potentially far more serious: assuming that if 'ok' was true, then the resulting pointer retrieved from the map would be non-nil. `nil` is a perfectly valid value to store in a map of pointers, and the comma-ok syntax is meant for when membership is distinct from having a non-zero value. There was only one or two cases that I saw that being used correctly for maps of pointers. --- cmd/influx_inspect/report/report.go | 12 +++++----- influxql/ast.go | 4 +--- influxql/parse_tree.go | 6 ++--- influxql/parser.go | 4 ++-- query/task_manager.go | 30 ++++++++---------------- services/continuous_querier/service.go | 4 +--- services/httpd/handler.go | 2 +- services/httpd/handler_test.go | 4 ++-- services/httpd/requests.go | 6 ++--- services/meta/data.go | 4 ++-- tsdb/engine/tsm1/cache.go | 14 +++++------ tsdb/engine/tsm1/cache_test.go | 4 ++-- tsdb/engine/tsm1/ring.go | 14 +++++------ tsdb/index/inmem/inmem.go | 2 +- tsdb/index/inmem/meta.go | 32 ++++++++++++-------------- 15 files changed, 63 insertions(+), 79 deletions(-) diff --git a/cmd/influx_inspect/report/report.go b/cmd/influx_inspect/report/report.go index 86e9f5f2d2..5e1a0b8075 100644 --- a/cmd/influx_inspect/report/report.go +++ b/cmd/influx_inspect/report/report.go @@ -104,23 +104,23 @@ func (cmd *Command) Run(args ...string) error { seriesKey, field := key[:sep], key[sep+4:] measurement, tags := models.ParseKey(seriesKey) - measCount, ok := measCardinalities[measurement] - if !ok { + measCount := measCardinalities[measurement] + if measCount == nil { measCount = hllpp.New() measCardinalities[measurement] = measCount } measCount.Add([]byte(key)) - fieldCount, ok := fieldCardinalities[measurement] - if !ok { + fieldCount := fieldCardinalities[measurement] + if fieldCount == nil { fieldCount = hllpp.New() fieldCardinalities[measurement] = fieldCount } fieldCount.Add([]byte(field)) for _, t := range tags { - tagCount, ok := tagCardinalities[string(t.Key)] - if !ok { + tagCount := tagCardinalities[string(t.Key)] + if tagCount == nil { tagCount = hllpp.New() tagCardinalities[string(t.Key)] = tagCount } diff --git a/influxql/ast.go b/influxql/ast.go index 9eb9a5871f..d841c033b5 100644 --- a/influxql/ast.go +++ b/influxql/ast.go @@ -1141,9 +1141,7 @@ func (s *SelectStatement) RewriteFields(m FieldMapper) (*SelectStatement, error) for _, d := range other.Dimensions { switch expr := d.Expr.(type) { case *VarRef: - if _, ok := dimensionSet[expr.Val]; ok { - delete(dimensionSet, expr.Val) - } + delete(dimensionSet, expr.Val) } } } diff --git a/influxql/parse_tree.go b/influxql/parse_tree.go index 1c78ca686a..b7769b6e0b 100644 --- a/influxql/parse_tree.go +++ b/influxql/parse_tree.go @@ -21,7 +21,7 @@ func (t *ParseTree) With(fn func(*ParseTree)) { func (t *ParseTree) Group(tokens ...Token) *ParseTree { for _, tok := range tokens { // Look for the parse tree for this token. - if subtree, ok := t.Tokens[tok]; ok { + if subtree := t.Tokens[tok]; subtree != nil { t = subtree continue } @@ -67,12 +67,12 @@ func (t *ParseTree) Handle(tok Token, fn func(*Parser) (Statement, error)) { func (t *ParseTree) Parse(p *Parser) (Statement, error) { for { tok, pos, lit := p.ScanIgnoreWhitespace() - if subtree, ok := t.Tokens[tok]; ok { + if subtree := t.Tokens[tok]; subtree != nil { t = subtree continue } - if stmt, ok := t.Handlers[tok]; ok { + if stmt := t.Handlers[tok]; stmt != nil { return stmt(p) } diff --git a/influxql/parser.go b/influxql/parser.go index f0f37b05fc..d33e5bbaba 100644 --- a/influxql/parser.go +++ b/influxql/parser.go @@ -2638,8 +2638,8 @@ func (p *Parser) parseUnaryExpr() (Expr, error) { return nil, errors.New("empty bound parameter") } - v, ok := p.params[k] - if !ok { + v := p.params[k] + if v == nil { return nil, fmt.Errorf("missing parameter: %s", k) } diff --git a/query/task_manager.go b/query/task_manager.go index c01d52c42e..f0b789720d 100644 --- a/query/task_manager.go +++ b/query/task_manager.go @@ -113,11 +113,13 @@ func (t *TaskManager) executeShowQueriesStatement(q *influxql.ShowQueriesStateme }}, nil } -func (t *TaskManager) query(qid uint64) (*QueryTask, bool) { +func (t *TaskManager) queryError(qid uint64, err error) { t.mu.RLock() - query, ok := t.queries[qid] + query := t.queries[qid] t.mu.RUnlock() - return query, ok + if query != nil { + query.setError(err) + } } // AttachQuery attaches a running query to be managed by the TaskManager. @@ -174,8 +176,8 @@ func (t *TaskManager) KillQuery(qid uint64) error { t.mu.Lock() defer t.mu.Unlock() - query, ok := t.queries[qid] - if !ok { + query := t.queries[qid] + if query == nil { return fmt.Errorf("no such query id: %d", qid) } @@ -220,27 +222,15 @@ func (t *TaskManager) waitForQuery(qid uint64, interrupt <-chan struct{}, closin select { case <-closing: - query, ok := t.query(qid) - if !ok { - break - } - query.setError(ErrQueryInterrupted) + t.queryError(qid, ErrQueryInterrupted) case err := <-monitorCh: if err == nil { break } - query, ok := t.query(qid) - if !ok { - break - } - query.setError(err) + t.queryError(qid, err) case <-timerCh: - query, ok := t.query(qid) - if !ok { - break - } - query.setError(ErrQueryTimeoutLimitExceeded) + t.queryError(qid, ErrQueryTimeoutLimitExceeded) case <-interrupt: // Query was manually closed so exit the select. return diff --git a/services/continuous_querier/service.go b/services/continuous_querier/service.go index d5f77e3874..abfb8197be 100644 --- a/services/continuous_querier/service.go +++ b/services/continuous_querier/service.go @@ -198,9 +198,7 @@ func (s *Service) Run(database, name string, t time.Time) error { if name == "" || cq.Name == name { // Remove the last run time for the CQ id := fmt.Sprintf("%s%s%s", db.Name, idDelimiter, cq.Name) - if _, ok := s.lastRuns[id]; ok { - delete(s.lastRuns, id) - } + delete(s.lastRuns, id) } } } diff --git a/services/httpd/handler.go b/services/httpd/handler.go index e3a073248b..b835b7c270 100644 --- a/services/httpd/handler.go +++ b/services/httpd/handler.go @@ -779,7 +779,7 @@ func (h *Handler) serveExpvar(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") first := true - if val, ok := diags["system"]; ok { + if val := diags["system"]; val != nil { jv, err := parseSystemDiagnostics(val) if err != nil { h.httpError(w, err.Error(), http.StatusInternalServerError) diff --git a/services/httpd/handler_test.go b/services/httpd/handler_test.go index 149a2b0ab9..530734d295 100644 --- a/services/httpd/handler_test.go +++ b/services/httpd/handler_test.go @@ -575,7 +575,7 @@ func TestHandler_Version(t *testing.T) { for _, test := range tests { w := httptest.NewRecorder() h.ServeHTTP(w, MustNewRequest(test.method, test.endpoint, test.body)) - if v, ok := w.HeaderMap["X-Influxdb-Version"]; ok { + if v := w.HeaderMap["X-Influxdb-Version"]; len(v) > 0 { if v[0] != "0.0.0" { t.Fatalf("unexpected version: %s", v) } @@ -583,7 +583,7 @@ func TestHandler_Version(t *testing.T) { t.Fatalf("Header entry 'X-Influxdb-Version' not present") } - if v, ok := w.HeaderMap["X-Influxdb-Build"]; ok { + if v := w.HeaderMap["X-Influxdb-Build"]; len(v) > 0 { if v[0] != "OSS" { t.Fatalf("unexpected BuildType: %s", v) } diff --git a/services/httpd/requests.go b/services/httpd/requests.go index 8fee1c06cc..23459947aa 100644 --- a/services/httpd/requests.go +++ b/services/httpd/requests.go @@ -47,16 +47,16 @@ func (p *RequestProfile) AddQuery(info RequestInfo) { func (p *RequestProfile) add(info RequestInfo, fn func(*RequestStats)) { // Look for a request entry for this request. p.mu.RLock() - st, ok := p.Requests[info] + st := p.Requests[info] p.mu.RUnlock() - if ok { + if st != nil { fn(st) return } // There is no entry in the request tracker. Create one. p.mu.Lock() - if st, ok := p.Requests[info]; ok { + if st := p.Requests[info]; st != nil { // Something else created this entry while we were waiting for the lock. p.mu.Unlock() fn(st) diff --git a/services/meta/data.go b/services/meta/data.go index d4b0b7fe8c..a48793f19b 100644 --- a/services/meta/data.go +++ b/services/meta/data.go @@ -1557,8 +1557,8 @@ func (leases *Leases) Acquire(name string, nodeID uint64) (*Lease, error) { leases.mu.Lock() defer leases.mu.Unlock() - l, ok := leases.m[name] - if ok { + l := leases.m[name] + if l != nil { if time.Now().After(l.Expiration) || l.Owner == nodeID { l.Expiration = time.Now().Add(leases.d) l.Owner = nodeID diff --git a/tsdb/engine/tsm1/cache.go b/tsdb/engine/tsm1/cache.go index cd0efcb290..4267a0c457 100644 --- a/tsdb/engine/tsm1/cache.go +++ b/tsdb/engine/tsm1/cache.go @@ -168,7 +168,7 @@ const ( // storer is the interface that descibes a cache's store. type storer interface { - entry(key []byte) (*entry, bool) // Get an entry by its key. + entry(key []byte) *entry // Get an entry by its key. write(key []byte, values Values) error // Write an entry to the store. add(key []byte, entry *entry) // Add a new entry to the store. remove(key []byte) // Remove an entry from the store. @@ -457,13 +457,13 @@ func (c *Cache) Values(key []byte) Values { var snapshotEntries *entry c.mu.RLock() - e, ok := c.store.entry(key) + e := c.store.entry(key) if c.snapshot != nil { - snapshotEntries, _ = c.snapshot.store.entry(key) + snapshotEntries = c.snapshot.store.entry(key) } c.mu.RUnlock() - if !ok { + if e == nil { if snapshotEntries == nil { // No values in hot cache or snapshots. return nil @@ -524,8 +524,8 @@ func (c *Cache) DeleteRange(keys [][]byte, min, max int64) { for _, k := range keys { // Make sure key exist in the cache, skip if it does not - e, ok := c.store.entry(k) - if !ok { + e := c.store.entry(k) + if e == nil { continue } @@ -559,7 +559,7 @@ func (c *Cache) SetMaxSize(size uint64) { // It doesn't lock the cache but it does read-lock the entry if there is one for the key. // values should only be used in compact.go in the CacheKeyIterator. func (c *Cache) values(key []byte) Values { - e, _ := c.store.entry(key) + e := c.store.entry(key) if e == nil { return nil } diff --git a/tsdb/engine/tsm1/cache_test.go b/tsdb/engine/tsm1/cache_test.go index ffd77ad933..febbf1538c 100644 --- a/tsdb/engine/tsm1/cache_test.go +++ b/tsdb/engine/tsm1/cache_test.go @@ -788,7 +788,7 @@ func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) { // TestStore implements the storer interface and can be used to mock out a // Cache's storer implememation. type TestStore struct { - entryf func(key []byte) (*entry, bool) + entryf func(key []byte) *entry writef func(key []byte, values Values) error addf func(key []byte, entry *entry) removef func(key []byte) @@ -799,7 +799,7 @@ type TestStore struct { } func NewTestStore() *TestStore { return &TestStore{} } -func (s *TestStore) entry(key []byte) (*entry, bool) { return s.entryf(key) } +func (s *TestStore) entry(key []byte) *entry { return s.entryf(key) } func (s *TestStore) write(key []byte, values Values) error { return s.writef(key, values) } func (s *TestStore) add(key []byte, entry *entry) { s.addf(key, entry) } func (s *TestStore) remove(key []byte) { s.removef(key) } diff --git a/tsdb/engine/tsm1/ring.go b/tsdb/engine/tsm1/ring.go index ad64e6901f..9118d6fe22 100644 --- a/tsdb/engine/tsm1/ring.go +++ b/tsdb/engine/tsm1/ring.go @@ -97,7 +97,7 @@ func (r *ring) getPartition(key []byte) *partition { // entry returns the entry for the given key. // entry is safe for use by multiple goroutines. -func (r *ring) entry(key []byte) (*entry, bool) { +func (r *ring) entry(key []byte) *entry { return r.getPartition(key).entry(key) } @@ -212,11 +212,11 @@ type partition struct { // entry returns the partition's entry for the provided key. // It's safe for use by multiple goroutines. -func (p *partition) entry(key []byte) (*entry, bool) { +func (p *partition) entry(key []byte) *entry { p.mu.RLock() - e, ok := p.store[string(key)] + e := p.store[string(key)] p.mu.RUnlock() - return e, ok + return e } // write writes the values to the entry in the partition, creating the entry @@ -224,9 +224,9 @@ func (p *partition) entry(key []byte) (*entry, bool) { // write is safe for use by multiple goroutines. func (p *partition) write(key []byte, values Values) error { p.mu.RLock() - e, ok := p.store[string(key)] + e := p.store[string(key)] p.mu.RUnlock() - if ok { + if e != nil { // Hot path. return e.add(values) } @@ -235,7 +235,7 @@ func (p *partition) write(key []byte, values Values) error { defer p.mu.Unlock() // Check again. - if e, ok = p.store[string(key)]; ok { + if e = p.store[string(key)]; e != nil { return e.add(values) } diff --git a/tsdb/index/inmem/inmem.go b/tsdb/index/inmem/inmem.go index a90022df18..e7ed91084a 100644 --- a/tsdb/index/inmem/inmem.go +++ b/tsdb/index/inmem/inmem.go @@ -767,7 +767,7 @@ func (i *Index) assignExistingSeries(shardID uint64, keys, names [][]byte, tagsS i.mu.RLock() var n int for j, key := range keys { - if ss, ok := i.series[string(key)]; !ok { + if ss := i.series[string(key)]; ss == nil { keys[n] = keys[j] names[n] = names[j] tagsSlice[n] = tagsSlice[j] diff --git a/tsdb/index/inmem/meta.go b/tsdb/index/inmem/meta.go index 0d62cc633d..93d6f60dd5 100644 --- a/tsdb/index/inmem/meta.go +++ b/tsdb/index/inmem/meta.go @@ -155,14 +155,14 @@ func (m *Measurement) HasTagKey(k string) bool { } func (m *Measurement) HasTagKeyValue(k, v []byte) bool { + var ok bool m.mu.RLock() - if vals, ok := m.seriesByTagKeyValue[string(k)]; ok { - _, ok := vals[string(v)] - m.mu.RUnlock() - return ok + vals := m.seriesByTagKeyValue[string(k)] + if vals != nil { + _, ok = vals[string(v)] } m.mu.RUnlock() - return false + return ok } // HasSeries returns true if there is at least 1 series under this measurement. @@ -198,7 +198,7 @@ func (m *Measurement) CardinalityBytes(key []byte) int { // It returns true if the series was added successfully or false if the series was already present. func (m *Measurement) AddSeries(s *Series) bool { m.mu.RLock() - if _, ok := m.seriesByID[s.ID]; ok { + if m.seriesByID[s.ID] != nil { m.mu.RUnlock() return false } @@ -207,7 +207,7 @@ func (m *Measurement) AddSeries(s *Series) bool { m.mu.Lock() defer m.mu.Unlock() - if _, ok := m.seriesByID[s.ID]; ok { + if m.seriesByID[s.ID] != nil { return false } @@ -244,6 +244,7 @@ func (m *Measurement) DropSeries(series *Series) { m.mu.Lock() defer m.mu.Unlock() + // Existence check before delete here to clean up the caching/indexing only when needed if _, ok := m.seriesByID[seriesID]; !ok { return } @@ -357,8 +358,8 @@ func (m *Measurement) TagSets(shardID uint64, opt query.IteratorOptions) ([]*que tagsAsKey = tsdb.MakeTagsKey(dims, s.Tags()) } - tagSet, ok := tagSets[string(tagsAsKey)] - if !ok { + tagSet := tagSets[string(tagsAsKey)] + if tagSet == nil { // This TagSet is new, create a new entry for it. tagSet = &query.TagSet{ Tags: nil, @@ -1123,11 +1124,7 @@ func NewSeries(key []byte, tags models.Tags) *Series { } func (s *Series) AssignShard(shardID uint64) { - s.mu.RLock() - _, ok := s.shardIDs[shardID] - s.mu.RUnlock() - - if ok { + if s.Assigned(shardID) { return } @@ -1438,11 +1435,12 @@ func (m *Measurement) TagValues(key string) []string { // SetFieldName adds the field name to the measurement. func (m *Measurement) SetFieldName(name string) { m.mu.RLock() - if _, ok := m.fieldNames[name]; ok { - m.mu.RUnlock() + _, ok := m.fieldNames[name] + m.mu.RUnlock() + + if ok { return } - m.mu.RUnlock() m.mu.Lock() m.fieldNames[name] = struct{}{}