Merge pull request #8769 from influxdata/jl-map-cleanup
cleanup: remove poor usage of ',ok' with mapspull/8770/head
commit
732a0c2eaa
|
@ -104,23 +104,23 @@ func (cmd *Command) Run(args ...string) error {
|
|||
seriesKey, field := key[:sep], key[sep+4:]
|
||||
measurement, tags := models.ParseKey(seriesKey)
|
||||
|
||||
measCount, ok := measCardinalities[measurement]
|
||||
if !ok {
|
||||
measCount := measCardinalities[measurement]
|
||||
if measCount == nil {
|
||||
measCount = hllpp.New()
|
||||
measCardinalities[measurement] = measCount
|
||||
}
|
||||
measCount.Add([]byte(key))
|
||||
|
||||
fieldCount, ok := fieldCardinalities[measurement]
|
||||
if !ok {
|
||||
fieldCount := fieldCardinalities[measurement]
|
||||
if fieldCount == nil {
|
||||
fieldCount = hllpp.New()
|
||||
fieldCardinalities[measurement] = fieldCount
|
||||
}
|
||||
fieldCount.Add([]byte(field))
|
||||
|
||||
for _, t := range tags {
|
||||
tagCount, ok := tagCardinalities[string(t.Key)]
|
||||
if !ok {
|
||||
tagCount := tagCardinalities[string(t.Key)]
|
||||
if tagCount == nil {
|
||||
tagCount = hllpp.New()
|
||||
tagCardinalities[string(t.Key)] = tagCount
|
||||
}
|
||||
|
|
|
@ -1147,9 +1147,7 @@ func (s *SelectStatement) RewriteFields(m FieldMapper) (*SelectStatement, error)
|
|||
for _, d := range other.Dimensions {
|
||||
switch expr := d.Expr.(type) {
|
||||
case *VarRef:
|
||||
if _, ok := dimensionSet[expr.Val]; ok {
|
||||
delete(dimensionSet, expr.Val)
|
||||
}
|
||||
delete(dimensionSet, expr.Val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func (t *ParseTree) With(fn func(*ParseTree)) {
|
|||
func (t *ParseTree) Group(tokens ...Token) *ParseTree {
|
||||
for _, tok := range tokens {
|
||||
// Look for the parse tree for this token.
|
||||
if subtree, ok := t.Tokens[tok]; ok {
|
||||
if subtree := t.Tokens[tok]; subtree != nil {
|
||||
t = subtree
|
||||
continue
|
||||
}
|
||||
|
@ -67,12 +67,12 @@ func (t *ParseTree) Handle(tok Token, fn func(*Parser) (Statement, error)) {
|
|||
func (t *ParseTree) Parse(p *Parser) (Statement, error) {
|
||||
for {
|
||||
tok, pos, lit := p.ScanIgnoreWhitespace()
|
||||
if subtree, ok := t.Tokens[tok]; ok {
|
||||
if subtree := t.Tokens[tok]; subtree != nil {
|
||||
t = subtree
|
||||
continue
|
||||
}
|
||||
|
||||
if stmt, ok := t.Handlers[tok]; ok {
|
||||
if stmt := t.Handlers[tok]; stmt != nil {
|
||||
return stmt(p)
|
||||
}
|
||||
|
||||
|
|
|
@ -2638,8 +2638,8 @@ func (p *Parser) parseUnaryExpr() (Expr, error) {
|
|||
return nil, errors.New("empty bound parameter")
|
||||
}
|
||||
|
||||
v, ok := p.params[k]
|
||||
if !ok {
|
||||
v := p.params[k]
|
||||
if v == nil {
|
||||
return nil, fmt.Errorf("missing parameter: %s", k)
|
||||
}
|
||||
|
||||
|
|
|
@ -113,11 +113,13 @@ func (t *TaskManager) executeShowQueriesStatement(q *influxql.ShowQueriesStateme
|
|||
}}, nil
|
||||
}
|
||||
|
||||
func (t *TaskManager) query(qid uint64) (*QueryTask, bool) {
|
||||
func (t *TaskManager) queryError(qid uint64, err error) {
|
||||
t.mu.RLock()
|
||||
query, ok := t.queries[qid]
|
||||
query := t.queries[qid]
|
||||
t.mu.RUnlock()
|
||||
return query, ok
|
||||
if query != nil {
|
||||
query.setError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// AttachQuery attaches a running query to be managed by the TaskManager.
|
||||
|
@ -174,8 +176,8 @@ func (t *TaskManager) KillQuery(qid uint64) error {
|
|||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
query, ok := t.queries[qid]
|
||||
if !ok {
|
||||
query := t.queries[qid]
|
||||
if query == nil {
|
||||
return fmt.Errorf("no such query id: %d", qid)
|
||||
}
|
||||
|
||||
|
@ -220,27 +222,15 @@ func (t *TaskManager) waitForQuery(qid uint64, interrupt <-chan struct{}, closin
|
|||
|
||||
select {
|
||||
case <-closing:
|
||||
query, ok := t.query(qid)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
query.setError(ErrQueryInterrupted)
|
||||
t.queryError(qid, ErrQueryInterrupted)
|
||||
case err := <-monitorCh:
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
query, ok := t.query(qid)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
query.setError(err)
|
||||
t.queryError(qid, err)
|
||||
case <-timerCh:
|
||||
query, ok := t.query(qid)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
query.setError(ErrQueryTimeoutLimitExceeded)
|
||||
t.queryError(qid, ErrQueryTimeoutLimitExceeded)
|
||||
case <-interrupt:
|
||||
// Query was manually closed so exit the select.
|
||||
return
|
||||
|
|
|
@ -198,9 +198,7 @@ func (s *Service) Run(database, name string, t time.Time) error {
|
|||
if name == "" || cq.Name == name {
|
||||
// Remove the last run time for the CQ
|
||||
id := fmt.Sprintf("%s%s%s", db.Name, idDelimiter, cq.Name)
|
||||
if _, ok := s.lastRuns[id]; ok {
|
||||
delete(s.lastRuns, id)
|
||||
}
|
||||
delete(s.lastRuns, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -779,7 +779,7 @@ func (h *Handler) serveExpvar(w http.ResponseWriter, r *http.Request) {
|
|||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
first := true
|
||||
if val, ok := diags["system"]; ok {
|
||||
if val := diags["system"]; val != nil {
|
||||
jv, err := parseSystemDiagnostics(val)
|
||||
if err != nil {
|
||||
h.httpError(w, err.Error(), http.StatusInternalServerError)
|
||||
|
|
|
@ -575,7 +575,7 @@ func TestHandler_Version(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, MustNewRequest(test.method, test.endpoint, test.body))
|
||||
if v, ok := w.HeaderMap["X-Influxdb-Version"]; ok {
|
||||
if v := w.HeaderMap["X-Influxdb-Version"]; len(v) > 0 {
|
||||
if v[0] != "0.0.0" {
|
||||
t.Fatalf("unexpected version: %s", v)
|
||||
}
|
||||
|
@ -583,7 +583,7 @@ func TestHandler_Version(t *testing.T) {
|
|||
t.Fatalf("Header entry 'X-Influxdb-Version' not present")
|
||||
}
|
||||
|
||||
if v, ok := w.HeaderMap["X-Influxdb-Build"]; ok {
|
||||
if v := w.HeaderMap["X-Influxdb-Build"]; len(v) > 0 {
|
||||
if v[0] != "OSS" {
|
||||
t.Fatalf("unexpected BuildType: %s", v)
|
||||
}
|
||||
|
|
|
@ -47,16 +47,16 @@ func (p *RequestProfile) AddQuery(info RequestInfo) {
|
|||
func (p *RequestProfile) add(info RequestInfo, fn func(*RequestStats)) {
|
||||
// Look for a request entry for this request.
|
||||
p.mu.RLock()
|
||||
st, ok := p.Requests[info]
|
||||
st := p.Requests[info]
|
||||
p.mu.RUnlock()
|
||||
if ok {
|
||||
if st != nil {
|
||||
fn(st)
|
||||
return
|
||||
}
|
||||
|
||||
// There is no entry in the request tracker. Create one.
|
||||
p.mu.Lock()
|
||||
if st, ok := p.Requests[info]; ok {
|
||||
if st := p.Requests[info]; st != nil {
|
||||
// Something else created this entry while we were waiting for the lock.
|
||||
p.mu.Unlock()
|
||||
fn(st)
|
||||
|
|
|
@ -1557,8 +1557,8 @@ func (leases *Leases) Acquire(name string, nodeID uint64) (*Lease, error) {
|
|||
leases.mu.Lock()
|
||||
defer leases.mu.Unlock()
|
||||
|
||||
l, ok := leases.m[name]
|
||||
if ok {
|
||||
l := leases.m[name]
|
||||
if l != nil {
|
||||
if time.Now().After(l.Expiration) || l.Owner == nodeID {
|
||||
l.Expiration = time.Now().Add(leases.d)
|
||||
l.Owner = nodeID
|
||||
|
|
|
@ -168,7 +168,7 @@ const (
|
|||
|
||||
// storer is the interface that descibes a cache's store.
|
||||
type storer interface {
|
||||
entry(key []byte) (*entry, bool) // Get an entry by its key.
|
||||
entry(key []byte) *entry // Get an entry by its key.
|
||||
write(key []byte, values Values) error // Write an entry to the store.
|
||||
add(key []byte, entry *entry) // Add a new entry to the store.
|
||||
remove(key []byte) // Remove an entry from the store.
|
||||
|
@ -457,13 +457,13 @@ func (c *Cache) Values(key []byte) Values {
|
|||
var snapshotEntries *entry
|
||||
|
||||
c.mu.RLock()
|
||||
e, ok := c.store.entry(key)
|
||||
e := c.store.entry(key)
|
||||
if c.snapshot != nil {
|
||||
snapshotEntries, _ = c.snapshot.store.entry(key)
|
||||
snapshotEntries = c.snapshot.store.entry(key)
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
if e == nil {
|
||||
if snapshotEntries == nil {
|
||||
// No values in hot cache or snapshots.
|
||||
return nil
|
||||
|
@ -524,8 +524,8 @@ func (c *Cache) DeleteRange(keys [][]byte, min, max int64) {
|
|||
|
||||
for _, k := range keys {
|
||||
// Make sure key exist in the cache, skip if it does not
|
||||
e, ok := c.store.entry(k)
|
||||
if !ok {
|
||||
e := c.store.entry(k)
|
||||
if e == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -559,7 +559,7 @@ func (c *Cache) SetMaxSize(size uint64) {
|
|||
// It doesn't lock the cache but it does read-lock the entry if there is one for the key.
|
||||
// values should only be used in compact.go in the CacheKeyIterator.
|
||||
func (c *Cache) values(key []byte) Values {
|
||||
e, _ := c.store.entry(key)
|
||||
e := c.store.entry(key)
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -788,7 +788,7 @@ func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {
|
|||
// TestStore implements the storer interface and can be used to mock out a
|
||||
// Cache's storer implememation.
|
||||
type TestStore struct {
|
||||
entryf func(key []byte) (*entry, bool)
|
||||
entryf func(key []byte) *entry
|
||||
writef func(key []byte, values Values) error
|
||||
addf func(key []byte, entry *entry)
|
||||
removef func(key []byte)
|
||||
|
@ -799,7 +799,7 @@ type TestStore struct {
|
|||
}
|
||||
|
||||
func NewTestStore() *TestStore { return &TestStore{} }
|
||||
func (s *TestStore) entry(key []byte) (*entry, bool) { return s.entryf(key) }
|
||||
func (s *TestStore) entry(key []byte) *entry { return s.entryf(key) }
|
||||
func (s *TestStore) write(key []byte, values Values) error { return s.writef(key, values) }
|
||||
func (s *TestStore) add(key []byte, entry *entry) { s.addf(key, entry) }
|
||||
func (s *TestStore) remove(key []byte) { s.removef(key) }
|
||||
|
|
|
@ -97,7 +97,7 @@ func (r *ring) getPartition(key []byte) *partition {
|
|||
|
||||
// entry returns the entry for the given key.
|
||||
// entry is safe for use by multiple goroutines.
|
||||
func (r *ring) entry(key []byte) (*entry, bool) {
|
||||
func (r *ring) entry(key []byte) *entry {
|
||||
return r.getPartition(key).entry(key)
|
||||
}
|
||||
|
||||
|
@ -212,11 +212,11 @@ type partition struct {
|
|||
|
||||
// entry returns the partition's entry for the provided key.
|
||||
// It's safe for use by multiple goroutines.
|
||||
func (p *partition) entry(key []byte) (*entry, bool) {
|
||||
func (p *partition) entry(key []byte) *entry {
|
||||
p.mu.RLock()
|
||||
e, ok := p.store[string(key)]
|
||||
e := p.store[string(key)]
|
||||
p.mu.RUnlock()
|
||||
return e, ok
|
||||
return e
|
||||
}
|
||||
|
||||
// write writes the values to the entry in the partition, creating the entry
|
||||
|
@ -224,9 +224,9 @@ func (p *partition) entry(key []byte) (*entry, bool) {
|
|||
// write is safe for use by multiple goroutines.
|
||||
func (p *partition) write(key []byte, values Values) error {
|
||||
p.mu.RLock()
|
||||
e, ok := p.store[string(key)]
|
||||
e := p.store[string(key)]
|
||||
p.mu.RUnlock()
|
||||
if ok {
|
||||
if e != nil {
|
||||
// Hot path.
|
||||
return e.add(values)
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ func (p *partition) write(key []byte, values Values) error {
|
|||
defer p.mu.Unlock()
|
||||
|
||||
// Check again.
|
||||
if e, ok = p.store[string(key)]; ok {
|
||||
if e = p.store[string(key)]; e != nil {
|
||||
return e.add(values)
|
||||
}
|
||||
|
||||
|
|
|
@ -767,7 +767,7 @@ func (i *Index) assignExistingSeries(shardID uint64, keys, names [][]byte, tagsS
|
|||
i.mu.RLock()
|
||||
var n int
|
||||
for j, key := range keys {
|
||||
if ss, ok := i.series[string(key)]; !ok {
|
||||
if ss := i.series[string(key)]; ss == nil {
|
||||
keys[n] = keys[j]
|
||||
names[n] = names[j]
|
||||
tagsSlice[n] = tagsSlice[j]
|
||||
|
|
|
@ -155,14 +155,14 @@ func (m *Measurement) HasTagKey(k string) bool {
|
|||
}
|
||||
|
||||
func (m *Measurement) HasTagKeyValue(k, v []byte) bool {
|
||||
var ok bool
|
||||
m.mu.RLock()
|
||||
if vals, ok := m.seriesByTagKeyValue[string(k)]; ok {
|
||||
_, ok := vals[string(v)]
|
||||
m.mu.RUnlock()
|
||||
return ok
|
||||
vals := m.seriesByTagKeyValue[string(k)]
|
||||
if vals != nil {
|
||||
_, ok = vals[string(v)]
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
return false
|
||||
return ok
|
||||
}
|
||||
|
||||
// HasSeries returns true if there is at least 1 series under this measurement.
|
||||
|
@ -198,7 +198,7 @@ func (m *Measurement) CardinalityBytes(key []byte) int {
|
|||
// It returns true if the series was added successfully or false if the series was already present.
|
||||
func (m *Measurement) AddSeries(s *Series) bool {
|
||||
m.mu.RLock()
|
||||
if _, ok := m.seriesByID[s.ID]; ok {
|
||||
if m.seriesByID[s.ID] != nil {
|
||||
m.mu.RUnlock()
|
||||
return false
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ func (m *Measurement) AddSeries(s *Series) bool {
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if _, ok := m.seriesByID[s.ID]; ok {
|
||||
if m.seriesByID[s.ID] != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -244,6 +244,7 @@ func (m *Measurement) DropSeries(series *Series) {
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Existence check before delete here to clean up the caching/indexing only when needed
|
||||
if _, ok := m.seriesByID[seriesID]; !ok {
|
||||
return
|
||||
}
|
||||
|
@ -357,8 +358,8 @@ func (m *Measurement) TagSets(shardID uint64, opt query.IteratorOptions) ([]*que
|
|||
tagsAsKey = tsdb.MakeTagsKey(dims, s.Tags())
|
||||
}
|
||||
|
||||
tagSet, ok := tagSets[string(tagsAsKey)]
|
||||
if !ok {
|
||||
tagSet := tagSets[string(tagsAsKey)]
|
||||
if tagSet == nil {
|
||||
// This TagSet is new, create a new entry for it.
|
||||
tagSet = &query.TagSet{
|
||||
Tags: nil,
|
||||
|
@ -1123,11 +1124,7 @@ func NewSeries(key []byte, tags models.Tags) *Series {
|
|||
}
|
||||
|
||||
func (s *Series) AssignShard(shardID uint64) {
|
||||
s.mu.RLock()
|
||||
_, ok := s.shardIDs[shardID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if ok {
|
||||
if s.Assigned(shardID) {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1438,11 +1435,12 @@ func (m *Measurement) TagValues(key string) []string {
|
|||
// SetFieldName adds the field name to the measurement.
|
||||
func (m *Measurement) SetFieldName(name string) {
|
||||
m.mu.RLock()
|
||||
if _, ok := m.fieldNames[name]; ok {
|
||||
m.mu.RUnlock()
|
||||
_, ok := m.fieldNames[name]
|
||||
m.mu.RUnlock()
|
||||
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
m.mu.Lock()
|
||||
m.fieldNames[name] = struct{}{}
|
||||
|
|
Loading…
Reference in New Issue